sfc: add support for skb->xmit_more
[linux-2.6-block.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
4aa956d8 42#include <linux/uaccess.h>
1da177e4
LT
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
3d58b5fa 62#include <net/inet_common.h>
6e5714ea 63#include <net/secure_seq.h>
d1a4c0b3 64#include <net/tcp_memcontrol.h>
076bb0c8 65#include <net/busy_poll.h>
1da177e4 66
1da177e4
LT
67#include <linux/proc_fs.h>
68#include <linux/seq_file.h>
69
cfb6eeb4
YH
70#include <linux/crypto.h>
71#include <linux/scatterlist.h>
72
cfb6eeb4 73static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
74static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
1da177e4
LT
76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 78
3b401a81
SH
79static const struct inet_connection_sock_af_ops ipv6_mapped;
80static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 81#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
82static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
84#else
85static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 86 const struct in6_addr *addr)
9501f972
YH
87{
88 return NULL;
89}
a928630a 90#endif
1da177e4 91
fae6ef87
NC
92static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93{
94 struct dst_entry *dst = skb_dst(skb);
fae6ef87 95
ca777eff
ED
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 if (rt->rt6i_node)
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 }
fae6ef87
NC
105}
106
1da177e4
LT
107static void tcp_v6_hash(struct sock *sk)
108{
109 if (sk->sk_state != TCP_CLOSE) {
8292a17a 110 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
111 tcp_prot.hash(sk);
112 return;
113 }
114 local_bh_disable();
9327f705 115 __inet6_hash(sk, NULL);
1da177e4
LT
116 local_bh_enable();
117 }
118}
119
cf533ea5 120static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
1da177e4 121{
0660e03f
ACM
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
124 tcp_hdr(skb)->dest,
125 tcp_hdr(skb)->source);
1da177e4
LT
126}
127
1ab1457c 128static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
129 int addr_len)
130{
131 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 132 struct inet_sock *inet = inet_sk(sk);
d83d8461 133 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
134 struct ipv6_pinfo *np = inet6_sk(sk);
135 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 136 struct in6_addr *saddr = NULL, *final_p, final;
493f377d 137 struct rt6_info *rt;
4c9483b2 138 struct flowi6 fl6;
1da177e4
LT
139 struct dst_entry *dst;
140 int addr_type;
141 int err;
142
1ab1457c 143 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
144 return -EINVAL;
145
1ab1457c 146 if (usin->sin6_family != AF_INET6)
a02cec21 147 return -EAFNOSUPPORT;
1da177e4 148
4c9483b2 149 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
150
151 if (np->sndflow) {
4c9483b2
DM
152 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 IP6_ECN_flow_init(fl6.flowlabel);
154 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 155 struct ip6_flowlabel *flowlabel;
4c9483b2 156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1da177e4
LT
157 if (flowlabel == NULL)
158 return -EINVAL;
1da177e4
LT
159 fl6_sock_release(flowlabel);
160 }
161 }
162
163 /*
1ab1457c
YH
164 * connect() to INADDR_ANY means loopback (BSD'ism).
165 */
166
4c99aa40 167 if (ipv6_addr_any(&usin->sin6_addr))
1ab1457c 168 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
169
170 addr_type = ipv6_addr_type(&usin->sin6_addr);
171
4c99aa40 172 if (addr_type & IPV6_ADDR_MULTICAST)
1da177e4
LT
173 return -ENETUNREACH;
174
175 if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 if (addr_len >= sizeof(struct sockaddr_in6) &&
177 usin->sin6_scope_id) {
178 /* If interface is set while binding, indices
179 * must coincide.
180 */
181 if (sk->sk_bound_dev_if &&
182 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 return -EINVAL;
184
185 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 }
187
188 /* Connect to link-local address requires an interface */
189 if (!sk->sk_bound_dev_if)
190 return -EINVAL;
191 }
192
193 if (tp->rx_opt.ts_recent_stamp &&
efe4208f 194 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
1da177e4
LT
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
197 tp->write_seq = 0;
198 }
199
efe4208f 200 sk->sk_v6_daddr = usin->sin6_addr;
4c9483b2 201 np->flow_label = fl6.flowlabel;
1da177e4 202
b73c3d0e
TH
203 ip6_set_txhash(sk);
204
1da177e4
LT
205 /*
206 * TCP over IPv4
207 */
208
209 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 210 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
211 struct sockaddr_in sin;
212
213 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
214
215 if (__ipv6_only_sock(sk))
216 return -ENETUNREACH;
217
218 sin.sin_family = AF_INET;
219 sin.sin_port = usin->sin6_port;
220 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
221
d83d8461 222 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 223 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
224#ifdef CONFIG_TCP_MD5SIG
225 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
226#endif
1da177e4
LT
227
228 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
229
230 if (err) {
d83d8461
ACM
231 icsk->icsk_ext_hdr_len = exthdrlen;
232 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 233 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
234#ifdef CONFIG_TCP_MD5SIG
235 tp->af_specific = &tcp_sock_ipv6_specific;
236#endif
1da177e4
LT
237 goto failure;
238 } else {
c720c7e8
ED
239 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
240 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
efe4208f 241 &sk->sk_v6_rcv_saddr);
1da177e4
LT
242 }
243
244 return err;
245 }
246
efe4208f
ED
247 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
248 saddr = &sk->sk_v6_rcv_saddr;
1da177e4 249
4c9483b2 250 fl6.flowi6_proto = IPPROTO_TCP;
efe4208f 251 fl6.daddr = sk->sk_v6_daddr;
4e3fd7a0 252 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
253 fl6.flowi6_oif = sk->sk_bound_dev_if;
254 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
255 fl6.fl6_dport = usin->sin6_port;
256 fl6.fl6_sport = inet->inet_sport;
1da177e4 257
4c9483b2 258 final_p = fl6_update_dst(&fl6, np->opt, &final);
1da177e4 259
4c9483b2 260 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 261
0e0d44ab 262 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
68d0c6d3
DM
263 if (IS_ERR(dst)) {
264 err = PTR_ERR(dst);
1da177e4 265 goto failure;
14e50e57 266 }
1da177e4
LT
267
268 if (saddr == NULL) {
4c9483b2 269 saddr = &fl6.saddr;
efe4208f 270 sk->sk_v6_rcv_saddr = *saddr;
1da177e4
LT
271 }
272
273 /* set the source address */
4e3fd7a0 274 np->saddr = *saddr;
c720c7e8 275 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 276
f83ef8c0 277 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 278 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 279
493f377d
DM
280 rt = (struct rt6_info *) dst;
281 if (tcp_death_row.sysctl_tw_recycle &&
282 !tp->rx_opt.ts_recent_stamp &&
efe4208f 283 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
81166dd6 284 tcp_fetch_timewait_stamp(sk, dst);
493f377d 285
d83d8461 286 icsk->icsk_ext_hdr_len = 0;
1da177e4 287 if (np->opt)
d83d8461
ACM
288 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
289 np->opt->opt_nflen);
1da177e4
LT
290
291 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
292
c720c7e8 293 inet->inet_dport = usin->sin6_port;
1da177e4
LT
294
295 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 296 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
297 if (err)
298 goto late_failure;
299
2b916477 300 if (!tp->write_seq && likely(!tp->repair))
1da177e4 301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
efe4208f 302 sk->sk_v6_daddr.s6_addr32,
c720c7e8
ED
303 inet->inet_sport,
304 inet->inet_dport);
1da177e4
LT
305
306 err = tcp_connect(sk);
307 if (err)
308 goto late_failure;
309
310 return 0;
311
312late_failure:
313 tcp_set_state(sk, TCP_CLOSE);
314 __sk_dst_reset(sk);
315failure:
c720c7e8 316 inet->inet_dport = 0;
1da177e4
LT
317 sk->sk_route_caps = 0;
318 return err;
319}
320
563d34d0
ED
321static void tcp_v6_mtu_reduced(struct sock *sk)
322{
323 struct dst_entry *dst;
324
325 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
326 return;
327
328 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
329 if (!dst)
330 return;
331
332 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 tcp_sync_mss(sk, dst_mtu(dst));
334 tcp_simple_retransmit(sk);
335 }
336}
337
1da177e4 338static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 339 u8 type, u8 code, int offset, __be32 info)
1da177e4 340{
4c99aa40 341 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
505cbfc5 342 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
343 struct ipv6_pinfo *np;
344 struct sock *sk;
345 int err;
1ab1457c 346 struct tcp_sock *tp;
0a672f74
YC
347 struct request_sock *fastopen;
348 __u32 seq, snd_una;
ca12a1a4 349 struct net *net = dev_net(skb->dev);
1da177e4 350
ca12a1a4 351 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 352 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
353
354 if (sk == NULL) {
e41b5368
DL
355 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
356 ICMP6_MIB_INERRORS);
1da177e4
LT
357 return;
358 }
359
360 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 361 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
362 return;
363 }
364
365 bh_lock_sock(sk);
563d34d0 366 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
de0744af 367 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
368
369 if (sk->sk_state == TCP_CLOSE)
370 goto out;
371
e802af9c
SH
372 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
374 goto out;
375 }
376
1da177e4 377 tp = tcp_sk(sk);
1ab1457c 378 seq = ntohl(th->seq);
0a672f74
YC
379 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 382 if (sk->sk_state != TCP_LISTEN &&
0a672f74 383 !between(seq, snd_una, tp->snd_nxt)) {
de0744af 384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
385 goto out;
386 }
387
388 np = inet6_sk(sk);
389
ec18d9a2
DM
390 if (type == NDISC_REDIRECT) {
391 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
392
1ed5c48f 393 if (dst)
6700c270 394 dst->ops->redirect(dst, sk, skb);
50a75a89 395 goto out;
ec18d9a2
DM
396 }
397
1da177e4 398 if (type == ICMPV6_PKT_TOOBIG) {
0d4f0608
ED
399 /* We are not interested in TCP_LISTEN and open_requests
400 * (SYN-ACKs send out by Linux are always <576bytes so
401 * they should go through unfragmented).
402 */
403 if (sk->sk_state == TCP_LISTEN)
404 goto out;
405
93b36cf3
HFS
406 if (!ip6_sk_accept_pmtu(sk))
407 goto out;
408
563d34d0
ED
409 tp->mtu_info = ntohl(info);
410 if (!sock_owned_by_user(sk))
411 tcp_v6_mtu_reduced(sk);
d013ef2a
JA
412 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
413 &tp->tsq_flags))
414 sock_hold(sk);
1da177e4
LT
415 goto out;
416 }
417
418 icmpv6_err_convert(type, code, &err);
419
60236fdd 420 /* Might be for an request_sock */
1da177e4 421 switch (sk->sk_state) {
60236fdd 422 struct request_sock *req, **prev;
1da177e4
LT
423 case TCP_LISTEN:
424 if (sock_owned_by_user(sk))
425 goto out;
426
8129765a
ACM
427 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
428 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
429 if (!req)
430 goto out;
431
432 /* ICMPs are not backlogged, hence we cannot get
433 * an established socket here.
434 */
547b792c 435 WARN_ON(req->sk != NULL);
1da177e4 436
2e6599cb 437 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 438 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
439 goto out;
440 }
441
463c84b9 442 inet_csk_reqsk_queue_drop(sk, req, prev);
5f1e942c 443 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
444 goto out;
445
446 case TCP_SYN_SENT:
0a672f74
YC
447 case TCP_SYN_RECV:
448 /* Only in fast or simultaneous open. If a fast open socket is
449 * is already accepted it is treated as a connected one below.
450 */
451 if (fastopen && fastopen->sk == NULL)
452 break;
453
1da177e4 454 if (!sock_owned_by_user(sk)) {
1da177e4
LT
455 sk->sk_err = err;
456 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
457
458 tcp_done(sk);
459 } else
460 sk->sk_err_soft = err;
461 goto out;
462 }
463
464 if (!sock_owned_by_user(sk) && np->recverr) {
465 sk->sk_err = err;
466 sk->sk_error_report(sk);
467 } else
468 sk->sk_err_soft = err;
469
470out:
471 bh_unlock_sock(sk);
472 sock_put(sk);
473}
474
475
9f10d3f6 476static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
d6274bd8 477 struct flowi *fl,
3840a06e 478 struct request_sock *req,
3a19ce0e
DL
479 u16 queue_mapping,
480 struct tcp_fastopen_cookie *foc)
1da177e4 481{
634fb979 482 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4 483 struct ipv6_pinfo *np = inet6_sk(sk);
d6274bd8 484 struct flowi6 *fl6 = &fl->u.ip6;
4c99aa40 485 struct sk_buff *skb;
9494218f 486 int err = -ENOMEM;
1da177e4 487
9f10d3f6
NC
488 /* First, grab a route. */
489 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
fd80eb94 490 goto done;
9494218f 491
3a19ce0e 492 skb = tcp_make_synack(sk, dst, req, foc);
9494218f 493
1da177e4 494 if (skb) {
634fb979
ED
495 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
496 &ireq->ir_v6_rmt_addr);
1da177e4 497
634fb979 498 fl6->daddr = ireq->ir_v6_rmt_addr;
df3687ff
FF
499 if (np->repflow && (ireq->pktopts != NULL))
500 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
501
fff32699 502 skb_set_queue_mapping(skb, queue_mapping);
43264e0b 503 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
b9df3cb8 504 err = net_xmit_eval(err);
1da177e4
LT
505 }
506
507done:
1da177e4
LT
508 return err;
509}
510
72659ecc 511
60236fdd 512static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 513{
634fb979 514 kfree_skb(inet_rsk(req)->pktopts);
1da177e4
LT
515}
516
cfb6eeb4
YH
517#ifdef CONFIG_TCP_MD5SIG
518static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 519 const struct in6_addr *addr)
cfb6eeb4 520{
a915da9b 521 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
522}
523
524static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
525 struct sock *addr_sk)
526{
efe4208f 527 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
cfb6eeb4
YH
528}
529
530static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
531 struct request_sock *req)
532{
634fb979 533 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
cfb6eeb4
YH
534}
535
4aa956d8
WY
536static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
537 int optlen)
cfb6eeb4
YH
538{
539 struct tcp_md5sig cmd;
540 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
cfb6eeb4
YH
541
542 if (optlen < sizeof(cmd))
543 return -EINVAL;
544
545 if (copy_from_user(&cmd, optval, sizeof(cmd)))
546 return -EFAULT;
547
548 if (sin6->sin6_family != AF_INET6)
549 return -EINVAL;
550
551 if (!cmd.tcpm_keylen) {
e773e4fa 552 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b
ED
553 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
554 AF_INET);
555 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
556 AF_INET6);
cfb6eeb4
YH
557 }
558
559 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
560 return -EINVAL;
561
a915da9b
ED
562 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
563 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
564 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 565
a915da9b
ED
566 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
567 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
568}
569
49a72dfb 570static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
b71d1d42
ED
571 const struct in6_addr *daddr,
572 const struct in6_addr *saddr, int nbytes)
cfb6eeb4 573{
cfb6eeb4 574 struct tcp6_pseudohdr *bp;
49a72dfb 575 struct scatterlist sg;
8d26d76d 576
cfb6eeb4 577 bp = &hp->md5_blk.ip6;
cfb6eeb4 578 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
579 bp->saddr = *saddr;
580 bp->daddr = *daddr;
49a72dfb 581 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 582 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 583
49a72dfb
AL
584 sg_init_one(&sg, bp, sizeof(*bp));
585 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
586}
c7da57a1 587
49a72dfb 588static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
b71d1d42 589 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 590 const struct tcphdr *th)
49a72dfb
AL
591{
592 struct tcp_md5sig_pool *hp;
593 struct hash_desc *desc;
594
595 hp = tcp_get_md5sig_pool();
596 if (!hp)
597 goto clear_hash_noput;
598 desc = &hp->md5_desc;
599
600 if (crypto_hash_init(desc))
601 goto clear_hash;
602 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
603 goto clear_hash;
604 if (tcp_md5_hash_header(hp, th))
605 goto clear_hash;
606 if (tcp_md5_hash_key(hp, key))
607 goto clear_hash;
608 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 609 goto clear_hash;
cfb6eeb4 610
cfb6eeb4 611 tcp_put_md5sig_pool();
cfb6eeb4 612 return 0;
49a72dfb 613
cfb6eeb4
YH
614clear_hash:
615 tcp_put_md5sig_pool();
616clear_hash_noput:
617 memset(md5_hash, 0, 16);
49a72dfb 618 return 1;
cfb6eeb4
YH
619}
620
49a72dfb 621static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
622 const struct sock *sk,
623 const struct request_sock *req,
624 const struct sk_buff *skb)
cfb6eeb4 625{
b71d1d42 626 const struct in6_addr *saddr, *daddr;
49a72dfb
AL
627 struct tcp_md5sig_pool *hp;
628 struct hash_desc *desc;
318cf7aa 629 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
630
631 if (sk) {
632 saddr = &inet6_sk(sk)->saddr;
efe4208f 633 daddr = &sk->sk_v6_daddr;
49a72dfb 634 } else if (req) {
634fb979
ED
635 saddr = &inet_rsk(req)->ir_v6_loc_addr;
636 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
49a72dfb 637 } else {
b71d1d42 638 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
639 saddr = &ip6h->saddr;
640 daddr = &ip6h->daddr;
cfb6eeb4 641 }
49a72dfb
AL
642
643 hp = tcp_get_md5sig_pool();
644 if (!hp)
645 goto clear_hash_noput;
646 desc = &hp->md5_desc;
647
648 if (crypto_hash_init(desc))
649 goto clear_hash;
650
651 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
652 goto clear_hash;
653 if (tcp_md5_hash_header(hp, th))
654 goto clear_hash;
655 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
656 goto clear_hash;
657 if (tcp_md5_hash_key(hp, key))
658 goto clear_hash;
659 if (crypto_hash_final(desc, md5_hash))
660 goto clear_hash;
661
662 tcp_put_md5sig_pool();
663 return 0;
664
665clear_hash:
666 tcp_put_md5sig_pool();
667clear_hash_noput:
668 memset(md5_hash, 0, 16);
669 return 1;
cfb6eeb4
YH
670}
671
9ea88a15
DP
672static int __tcp_v6_inbound_md5_hash(struct sock *sk,
673 const struct sk_buff *skb)
cfb6eeb4 674{
cf533ea5 675 const __u8 *hash_location = NULL;
cfb6eeb4 676 struct tcp_md5sig_key *hash_expected;
b71d1d42 677 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 678 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 679 int genhash;
cfb6eeb4
YH
680 u8 newhash[16];
681
682 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 683 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 684
785957d3
DM
685 /* We've parsed the options - do we have a hash? */
686 if (!hash_expected && !hash_location)
687 return 0;
688
689 if (hash_expected && !hash_location) {
690 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
691 return 1;
692 }
693
785957d3
DM
694 if (!hash_expected && hash_location) {
695 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
696 return 1;
697 }
698
699 /* check the signature */
49a72dfb
AL
700 genhash = tcp_v6_md5_hash_skb(newhash,
701 hash_expected,
702 NULL, NULL, skb);
703
cfb6eeb4 704 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
705 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
706 genhash ? "failed" : "mismatch",
707 &ip6h->saddr, ntohs(th->source),
708 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
709 return 1;
710 }
711 return 0;
712}
9ea88a15
DP
713
714static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
715{
716 int ret;
717
718 rcu_read_lock();
719 ret = __tcp_v6_inbound_md5_hash(sk, skb);
720 rcu_read_unlock();
721
722 return ret;
723}
724
cfb6eeb4
YH
725#endif
726
16bea70a
OP
727static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
728 struct sk_buff *skb)
729{
730 struct inet_request_sock *ireq = inet_rsk(req);
731 struct ipv6_pinfo *np = inet6_sk(sk);
732
733 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
734 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
735
736 ireq->ir_iif = sk->sk_bound_dev_if;
737
738 /* So that link locals have meaning */
739 if (!sk->sk_bound_dev_if &&
740 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
741 ireq->ir_iif = inet6_iif(skb);
742
04317daf 743 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
a224772d
ED
744 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
745 np->rxopt.bits.rxinfo ||
16bea70a
OP
746 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
747 np->rxopt.bits.rxohlim || np->repflow)) {
748 atomic_inc(&skb->users);
749 ireq->pktopts = skb;
750 }
751}
752
d94e0417
OP
753static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
754 const struct request_sock *req,
755 bool *strict)
756{
757 if (strict)
758 *strict = true;
759 return inet6_csk_route_req(sk, &fl->u.ip6, req);
760}
761
c6aefafb 762struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 763 .family = AF_INET6,
2e6599cb 764 .obj_size = sizeof(struct tcp6_request_sock),
5db92c99 765 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
766 .send_ack = tcp_v6_reqsk_send_ack,
767 .destructor = tcp_v6_reqsk_destructor,
72659ecc 768 .send_reset = tcp_v6_send_reset,
4aa956d8 769 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
770};
771
b2e4b3de 772static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
2aec4a29
OP
773 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
774 sizeof(struct ipv6hdr),
16bea70a 775#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 776 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 777 .calc_md5_hash = tcp_v6_md5_hash_skb,
b6332e6c 778#endif
16bea70a 779 .init_req = tcp_v6_init_req,
fb7b37a7
OP
780#ifdef CONFIG_SYN_COOKIES
781 .cookie_init_seq = cookie_v6_init_sequence,
782#endif
d94e0417 783 .route_req = tcp_v6_route_req,
936b8bdb 784 .init_seq = tcp_v6_init_sequence,
d6274bd8 785 .send_synack = tcp_v6_send_synack,
695da14e 786 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
16bea70a 787};
cfb6eeb4 788
626e264d 789static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
9c76a114 790 u32 tsval, u32 tsecr, int oif,
1d13a96c
FF
791 struct tcp_md5sig_key *key, int rst, u8 tclass,
792 u32 label)
1da177e4 793{
cf533ea5
ED
794 const struct tcphdr *th = tcp_hdr(skb);
795 struct tcphdr *t1;
1da177e4 796 struct sk_buff *buff;
4c9483b2 797 struct flowi6 fl6;
adf30907 798 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 799 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 800 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 801 struct dst_entry *dst;
81ada62d 802 __be32 *topt;
1da177e4 803
ee684b6f 804 if (tsecr)
626e264d 805 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 806#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
807 if (key)
808 tot_len += TCPOLEN_MD5SIG_ALIGNED;
809#endif
810
cfb6eeb4 811 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 812 GFP_ATOMIC);
1ab1457c
YH
813 if (buff == NULL)
814 return;
1da177e4 815
cfb6eeb4 816 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 817
cfb6eeb4 818 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 819 skb_reset_transport_header(buff);
1da177e4
LT
820
821 /* Swap the send and the receive. */
822 memset(t1, 0, sizeof(*t1));
823 t1->dest = th->source;
824 t1->source = th->dest;
cfb6eeb4 825 t1->doff = tot_len / 4;
626e264d
IJ
826 t1->seq = htonl(seq);
827 t1->ack_seq = htonl(ack);
828 t1->ack = !rst || !th->ack;
829 t1->rst = rst;
830 t1->window = htons(win);
1da177e4 831
81ada62d
IJ
832 topt = (__be32 *)(t1 + 1);
833
ee684b6f 834 if (tsecr) {
626e264d
IJ
835 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
836 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
ee684b6f
AV
837 *topt++ = htonl(tsval);
838 *topt++ = htonl(tsecr);
626e264d
IJ
839 }
840
cfb6eeb4
YH
841#ifdef CONFIG_TCP_MD5SIG
842 if (key) {
81ada62d
IJ
843 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
844 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
845 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
846 &ipv6_hdr(skb)->saddr,
847 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
848 }
849#endif
850
4c9483b2 851 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
852 fl6.daddr = ipv6_hdr(skb)->saddr;
853 fl6.saddr = ipv6_hdr(skb)->daddr;
1d13a96c 854 fl6.flowlabel = label;
1da177e4 855
e5700aff
DM
856 buff->ip_summed = CHECKSUM_PARTIAL;
857 buff->csum = 0;
858
4c9483b2 859 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 860
4c9483b2 861 fl6.flowi6_proto = IPPROTO_TCP;
a36dbdb2 862 if (rt6_need_strict(&fl6.daddr) && !oif)
4c675258 863 fl6.flowi6_oif = inet6_iif(skb);
9c76a114
WY
864 else
865 fl6.flowi6_oif = oif;
e110861f 866 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
1958b856
DM
867 fl6.fl6_dport = t1->dest;
868 fl6.fl6_sport = t1->source;
4c9483b2 869 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 870
c20121ae
DL
871 /* Pass a socket to ip6_dst_lookup either it is for RST
872 * Underlying function will use this to retrieve the network
873 * namespace
874 */
0e0d44ab 875 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
68d0c6d3
DM
876 if (!IS_ERR(dst)) {
877 skb_dst_set(buff, dst);
b903d324 878 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
68d0c6d3
DM
879 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
880 if (rst)
881 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
882 return;
1da177e4
LT
883 }
884
885 kfree_skb(buff);
886}
887
626e264d 888static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 889{
cf533ea5 890 const struct tcphdr *th = tcp_hdr(skb);
626e264d 891 u32 seq = 0, ack_seq = 0;
fa3e5b4e 892 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
893#ifdef CONFIG_TCP_MD5SIG
894 const __u8 *hash_location = NULL;
895 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
896 unsigned char newhash[16];
897 int genhash;
898 struct sock *sk1 = NULL;
899#endif
9c76a114 900 int oif;
1da177e4 901
626e264d 902 if (th->rst)
1da177e4
LT
903 return;
904
626e264d
IJ
905 if (!ipv6_unicast_destination(skb))
906 return;
1da177e4 907
cfb6eeb4 908#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
909 hash_location = tcp_parse_md5sig_option(th);
910 if (!sk && hash_location) {
911 /*
912 * active side is lost. Try to find listening socket through
913 * source port, and then find md5 key through listening socket.
914 * we are not loose security here:
915 * Incoming packet is checked with md5 hash with finding key,
916 * no RST generated if md5 hash doesn't match.
917 */
918 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
5ba24953
TH
919 &tcp_hashinfo, &ipv6h->saddr,
920 th->source, &ipv6h->daddr,
658ddaaf
SL
921 ntohs(th->source), inet6_iif(skb));
922 if (!sk1)
923 return;
924
925 rcu_read_lock();
926 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
927 if (!key)
928 goto release_sk1;
929
930 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
931 if (genhash || memcmp(hash_location, newhash, 16) != 0)
932 goto release_sk1;
933 } else {
934 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
935 }
cfb6eeb4
YH
936#endif
937
626e264d
IJ
938 if (th->ack)
939 seq = ntohl(th->ack_seq);
940 else
941 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
942 (th->doff << 2);
1da177e4 943
9c76a114
WY
944 oif = sk ? sk->sk_bound_dev_if : 0;
945 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
658ddaaf
SL
946
947#ifdef CONFIG_TCP_MD5SIG
948release_sk1:
949 if (sk1) {
950 rcu_read_unlock();
951 sock_put(sk1);
952 }
953#endif
626e264d 954}
1da177e4 955
ee684b6f 956static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
9c76a114 957 u32 win, u32 tsval, u32 tsecr, int oif,
1d13a96c
FF
958 struct tcp_md5sig_key *key, u8 tclass,
959 u32 label)
626e264d 960{
9c76a114 961 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
1d13a96c 962 label);
1da177e4
LT
963}
964
965static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
966{
8feaf0c0 967 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 968 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 969
9501f972 970 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 971 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 972 tcp_time_stamp + tcptw->tw_ts_offset,
9c76a114 973 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
1d13a96c 974 tw->tw_tclass, (tw->tw_flowlabel << 12));
1da177e4 975
8feaf0c0 976 inet_twsk_put(tw);
1da177e4
LT
977}
978
6edafaaf
GJ
979static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
980 struct request_sock *req)
1da177e4 981{
3a19ce0e
DL
982 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
983 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
984 */
985 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
986 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
987 tcp_rsk(req)->rcv_nxt,
9c76a114 988 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
1d13a96c
FF
989 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
990 0, 0);
1da177e4
LT
991}
992
993
4c99aa40 994static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
1da177e4 995{
60236fdd 996 struct request_sock *req, **prev;
aa8223c7 997 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
998 struct sock *nsk;
999
1000 /* Find possible connection requests. */
8129765a 1001 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1002 &ipv6_hdr(skb)->saddr,
1003 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4 1004 if (req)
8336886f 1005 return tcp_check_req(sk, skb, req, prev, false);
1da177e4 1006
3b1e0a65 1007 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1008 &ipv6_hdr(skb)->saddr, th->source,
1009 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1010
1011 if (nsk) {
1012 if (nsk->sk_state != TCP_TIME_WAIT) {
1013 bh_lock_sock(nsk);
1014 return nsk;
1015 }
9469c7b4 1016 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1017 return NULL;
1018 }
1019
c6aefafb 1020#ifdef CONFIG_SYN_COOKIES
af9b4738 1021 if (!th->syn)
c6aefafb 1022 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1023#endif
1024 return sk;
1025}
1026
1da177e4
LT
1027static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1028{
1da177e4
LT
1029 if (skb->protocol == htons(ETH_P_IP))
1030 return tcp_v4_conn_request(sk, skb);
1031
1032 if (!ipv6_unicast_destination(skb))
1ab1457c 1033 goto drop;
1da177e4 1034
1fb6f159
OP
1035 return tcp_conn_request(&tcp6_request_sock_ops,
1036 &tcp_request_sock_ipv6_ops, sk, skb);
1da177e4
LT
1037
1038drop:
5f1e942c 1039 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1040 return 0; /* don't send reset */
1041}
1042
4c99aa40
WC
1043static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1044 struct request_sock *req,
1045 struct dst_entry *dst)
1da177e4 1046{
634fb979 1047 struct inet_request_sock *ireq;
1da177e4
LT
1048 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1049 struct tcp6_sock *newtcp6sk;
1050 struct inet_sock *newinet;
1051 struct tcp_sock *newtp;
1052 struct sock *newsk;
cfb6eeb4
YH
1053#ifdef CONFIG_TCP_MD5SIG
1054 struct tcp_md5sig_key *key;
1055#endif
3840a06e 1056 struct flowi6 fl6;
1da177e4
LT
1057
1058 if (skb->protocol == htons(ETH_P_IP)) {
1059 /*
1060 * v6 mapped
1061 */
1062
1063 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1064
1ab1457c 1065 if (newsk == NULL)
1da177e4
LT
1066 return NULL;
1067
1068 newtcp6sk = (struct tcp6_sock *)newsk;
1069 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1070
1071 newinet = inet_sk(newsk);
1072 newnp = inet6_sk(newsk);
1073 newtp = tcp_sk(newsk);
1074
1075 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1076
efe4208f 1077 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1da177e4 1078
c720c7e8 1079 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4 1080
efe4208f 1081 newsk->sk_v6_rcv_saddr = newnp->saddr;
1da177e4 1082
8292a17a 1083 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1084 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1085#ifdef CONFIG_TCP_MD5SIG
1086 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1087#endif
1088
676a1184
YZ
1089 newnp->ipv6_ac_list = NULL;
1090 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1091 newnp->pktoptions = NULL;
1092 newnp->opt = NULL;
505cbfc5 1093 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1094 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1095 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1096 if (np->repflow)
1097 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4 1098
e6848976
ACM
1099 /*
1100 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1101 * here, tcp_create_openreq_child now does this for us, see the comment in
1102 * that function for the gory details. -acme
1da177e4 1103 */
1da177e4
LT
1104
1105 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1106 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1107 Sync it now.
1108 */
d83d8461 1109 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1110
1111 return newsk;
1112 }
1113
634fb979 1114 ireq = inet_rsk(req);
1da177e4
LT
1115
1116 if (sk_acceptq_is_full(sk))
1117 goto out_overflow;
1118
493f377d 1119 if (!dst) {
3840a06e 1120 dst = inet6_csk_route_req(sk, &fl6, req);
493f377d 1121 if (!dst)
1da177e4 1122 goto out;
1ab1457c 1123 }
1da177e4
LT
1124
1125 newsk = tcp_create_openreq_child(sk, req, skb);
1126 if (newsk == NULL)
093d2823 1127 goto out_nonewsk;
1da177e4 1128
e6848976
ACM
1129 /*
1130 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1131 * count here, tcp_create_openreq_child now does this for us, see the
1132 * comment in that function for the gory details. -acme
1133 */
1da177e4 1134
59eed279 1135 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1136 __ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1137 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1138
1139 newtcp6sk = (struct tcp6_sock *)newsk;
1140 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1141
1142 newtp = tcp_sk(newsk);
1143 newinet = inet_sk(newsk);
1144 newnp = inet6_sk(newsk);
1145
1146 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1147
634fb979
ED
1148 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1149 newnp->saddr = ireq->ir_v6_loc_addr;
1150 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1151 newsk->sk_bound_dev_if = ireq->ir_iif;
1da177e4 1152
b73c3d0e
TH
1153 ip6_set_txhash(newsk);
1154
1ab1457c 1155 /* Now IPv6 options...
1da177e4
LT
1156
1157 First: no IPv4 options.
1158 */
f6d8bd05 1159 newinet->inet_opt = NULL;
676a1184 1160 newnp->ipv6_ac_list = NULL;
d35690be 1161 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1162
1163 /* Clone RX bits */
1164 newnp->rxopt.all = np->rxopt.all;
1165
1166 /* Clone pktoptions received with SYN */
1167 newnp->pktoptions = NULL;
634fb979
ED
1168 if (ireq->pktopts != NULL) {
1169 newnp->pktoptions = skb_clone(ireq->pktopts,
99a1dec7 1170 sk_gfp_atomic(sk, GFP_ATOMIC));
634fb979
ED
1171 consume_skb(ireq->pktopts);
1172 ireq->pktopts = NULL;
1da177e4
LT
1173 if (newnp->pktoptions)
1174 skb_set_owner_r(newnp->pktoptions, newsk);
1175 }
1176 newnp->opt = NULL;
505cbfc5 1177 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1178 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1397ed35 1179 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
df3687ff
FF
1180 if (np->repflow)
1181 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1da177e4
LT
1182
1183 /* Clone native IPv6 options from listening socket (if any)
1184
1185 Yes, keeping reference count would be much more clever,
1186 but we make one more one thing there: reattach optmem
1187 to newsk.
1188 */
43264e0b
RL
1189 if (np->opt)
1190 newnp->opt = ipv6_dup_options(newsk, np->opt);
1da177e4 1191
d83d8461 1192 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1193 if (newnp->opt)
d83d8461
ACM
1194 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1195 newnp->opt->opt_flen);
1da177e4
LT
1196
1197 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1198 newtp->advmss = dst_metric_advmss(dst);
d135c522
NC
1199 if (tcp_sk(sk)->rx_opt.user_mss &&
1200 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1201 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1202
1da177e4
LT
1203 tcp_initialize_rcv_mss(newsk);
1204
c720c7e8
ED
1205 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1206 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1207
cfb6eeb4
YH
1208#ifdef CONFIG_TCP_MD5SIG
1209 /* Copy over the MD5 key from the original socket */
4aa956d8
WY
1210 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1211 if (key != NULL) {
cfb6eeb4
YH
1212 /* We're using one, so create a matching key
1213 * on the newsk structure. If we fail to get
1214 * memory, then we end up not copying the key
1215 * across. Shucks.
1216 */
efe4208f 1217 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
99a1dec7
MG
1218 AF_INET6, key->key, key->keylen,
1219 sk_gfp_atomic(sk, GFP_ATOMIC));
cfb6eeb4
YH
1220 }
1221#endif
1222
093d2823 1223 if (__inet_inherit_port(sk, newsk) < 0) {
e337e24d
CP
1224 inet_csk_prepare_forced_close(newsk);
1225 tcp_done(newsk);
093d2823
BS
1226 goto out;
1227 }
9327f705 1228 __inet6_hash(newsk, NULL);
1da177e4
LT
1229
1230 return newsk;
1231
1232out_overflow:
de0744af 1233 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1234out_nonewsk:
1da177e4 1235 dst_release(dst);
093d2823
BS
1236out:
1237 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1238 return NULL;
1239}
1240
1da177e4
LT
1241/* The socket must have it's spinlock held when we get
1242 * here.
1243 *
1244 * We have a potential double-lock case here, so even when
1245 * doing backlog processing we use the BH locking scheme.
1246 * This is because we cannot sleep with the original spinlock
1247 * held.
1248 */
1249static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1250{
1251 struct ipv6_pinfo *np = inet6_sk(sk);
1252 struct tcp_sock *tp;
1253 struct sk_buff *opt_skb = NULL;
1254
1255 /* Imagine: socket is IPv6. IPv4 packet arrives,
1256 goes to IPv4 receive handler and backlogged.
1257 From backlog it always goes here. Kerboom...
1258 Fortunately, tcp_rcv_established and rcv_established
1259 handle them correctly, but it is not case with
1260 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1261 */
1262
1263 if (skb->protocol == htons(ETH_P_IP))
1264 return tcp_v4_do_rcv(sk, skb);
1265
fda9ef5d 1266 if (sk_filter(sk, skb))
1da177e4
LT
1267 goto discard;
1268
1269 /*
1270 * socket locking is here for SMP purposes as backlog rcv
1271 * is currently called with bh processing disabled.
1272 */
1273
1274 /* Do Stevens' IPV6_PKTOPTIONS.
1275
1276 Yes, guys, it is the only place in our code, where we
1277 may make it not affecting IPv4.
1278 The rest of code is protocol independent,
1279 and I do not like idea to uglify IPv4.
1280
1281 Actually, all the idea behind IPV6_PKTOPTIONS
1282 looks not very well thought. For now we latch
1283 options, received in the last packet, enqueued
1284 by tcp. Feel free to propose better solution.
1ab1457c 1285 --ANK (980728)
1da177e4
LT
1286 */
1287 if (np->rxopt.all)
99a1dec7 1288 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1da177e4
LT
1289
1290 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1291 struct dst_entry *dst = sk->sk_rx_dst;
1292
bdeab991 1293 sock_rps_save_rxhash(sk, skb);
5d299f3d
ED
1294 if (dst) {
1295 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1296 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1297 dst_release(dst);
1298 sk->sk_rx_dst = NULL;
1299 }
1300 }
1301
c995ae22 1302 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1303 if (opt_skb)
1304 goto ipv6_pktoptions;
1305 return 0;
1306 }
1307
ab6a5bb6 1308 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1309 goto csum_err;
1310
1ab1457c 1311 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1312 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1313 if (!nsk)
1314 goto discard;
1315
1316 /*
1317 * Queue it on the new socket if the new socket is active,
1318 * otherwise we just shortcircuit this and continue with
1319 * the new socket..
1320 */
4c99aa40 1321 if (nsk != sk) {
bdeab991 1322 sock_rps_save_rxhash(nsk, skb);
1da177e4
LT
1323 if (tcp_child_process(sk, nsk, skb))
1324 goto reset;
1325 if (opt_skb)
1326 __kfree_skb(opt_skb);
1327 return 0;
1328 }
47482f13 1329 } else
bdeab991 1330 sock_rps_save_rxhash(sk, skb);
1da177e4 1331
aa8223c7 1332 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1333 goto reset;
1da177e4
LT
1334 if (opt_skb)
1335 goto ipv6_pktoptions;
1336 return 0;
1337
1338reset:
cfb6eeb4 1339 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1340discard:
1341 if (opt_skb)
1342 __kfree_skb(opt_skb);
1343 kfree_skb(skb);
1344 return 0;
1345csum_err:
6a5dc9e5 1346 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
63231bdd 1347 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1348 goto discard;
1349
1350
1351ipv6_pktoptions:
1352 /* Do you ask, what is it?
1353
1354 1. skb was enqueued by tcp.
1355 2. skb is added to tail of read queue, rather than out of order.
1356 3. socket is not in passive state.
1357 4. Finally, it really contains options, which user wants to receive.
1358 */
1359 tp = tcp_sk(sk);
1360 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1361 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1362 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1363 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1364 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1365 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
82e9f105 1366 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1397ed35 1367 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
df3687ff
FF
1368 if (np->repflow)
1369 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
a224772d 1370 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1da177e4
LT
1371 skb_set_owner_r(opt_skb, sk);
1372 opt_skb = xchg(&np->pktoptions, opt_skb);
1373 } else {
1374 __kfree_skb(opt_skb);
1375 opt_skb = xchg(&np->pktoptions, NULL);
1376 }
1377 }
1378
800d55f1 1379 kfree_skb(opt_skb);
1da177e4
LT
1380 return 0;
1381}
1382
e5bbef20 1383static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1384{
cf533ea5 1385 const struct tcphdr *th;
b71d1d42 1386 const struct ipv6hdr *hdr;
1da177e4
LT
1387 struct sock *sk;
1388 int ret;
a86b1e30 1389 struct net *net = dev_net(skb->dev);
1da177e4
LT
1390
1391 if (skb->pkt_type != PACKET_HOST)
1392 goto discard_it;
1393
1394 /*
1395 * Count it even if it's bad.
1396 */
63231bdd 1397 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1398
1399 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1400 goto discard_it;
1401
aa8223c7 1402 th = tcp_hdr(skb);
1da177e4
LT
1403
1404 if (th->doff < sizeof(struct tcphdr)/4)
1405 goto bad_packet;
1406 if (!pskb_may_pull(skb, th->doff*4))
1407 goto discard_it;
1408
e4f45b7f 1409 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
6a5dc9e5 1410 goto csum_error;
1da177e4 1411
aa8223c7 1412 th = tcp_hdr(skb);
e802af9c 1413 hdr = ipv6_hdr(skb);
971f10ec
ED
1414 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1415 * barrier() makes sure compiler wont play fool^Waliasing games.
1416 */
1417 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1418 sizeof(struct inet6_skb_parm));
1419 barrier();
1420
1da177e4
LT
1421 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1422 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1423 skb->len - th->doff*4);
1424 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
e11ecddf 1425 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
04317daf 1426 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
b82d1bb4 1427 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1da177e4
LT
1428 TCP_SKB_CB(skb)->sacked = 0;
1429
9a1f27c4 1430 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1431 if (!sk)
1432 goto no_tcp_socket;
1433
1434process:
1435 if (sk->sk_state == TCP_TIME_WAIT)
1436 goto do_time_wait;
1437
e802af9c
SH
1438 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1439 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1440 goto discard_and_relse;
1441 }
1442
1da177e4
LT
1443 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1444 goto discard_and_relse;
1445
9ea88a15
DP
1446#ifdef CONFIG_TCP_MD5SIG
1447 if (tcp_v6_inbound_md5_hash(sk, skb))
1448 goto discard_and_relse;
1449#endif
1450
fda9ef5d 1451 if (sk_filter(sk, skb))
1da177e4
LT
1452 goto discard_and_relse;
1453
8b80cda5 1454 sk_mark_napi_id(sk, skb);
1da177e4
LT
1455 skb->dev = NULL;
1456
293b9c42 1457 bh_lock_sock_nested(sk);
1da177e4
LT
1458 ret = 0;
1459 if (!sock_owned_by_user(sk)) {
7bced397 1460 if (!tcp_prequeue(sk, skb))
1ab1457c 1461 ret = tcp_v6_do_rcv(sk, skb);
da882c1f
ED
1462 } else if (unlikely(sk_add_backlog(sk, skb,
1463 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1464 bh_unlock_sock(sk);
6cce09f8 1465 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1466 goto discard_and_relse;
1467 }
1da177e4
LT
1468 bh_unlock_sock(sk);
1469
1470 sock_put(sk);
1471 return ret ? -1 : 0;
1472
1473no_tcp_socket:
1474 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1475 goto discard_it;
1476
1477 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
6a5dc9e5
ED
1478csum_error:
1479 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1da177e4 1480bad_packet:
63231bdd 1481 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1482 } else {
cfb6eeb4 1483 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1484 }
1485
1486discard_it:
1da177e4
LT
1487 kfree_skb(skb);
1488 return 0;
1489
1490discard_and_relse:
1491 sock_put(sk);
1492 goto discard_it;
1493
1494do_time_wait:
1495 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1496 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1497 goto discard_it;
1498 }
1499
6a5dc9e5 1500 if (skb->len < (th->doff<<2)) {
9469c7b4 1501 inet_twsk_put(inet_twsk(sk));
6a5dc9e5
ED
1502 goto bad_packet;
1503 }
1504 if (tcp_checksum_complete(skb)) {
1505 inet_twsk_put(inet_twsk(sk));
1506 goto csum_error;
1da177e4
LT
1507 }
1508
9469c7b4 1509 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1510 case TCP_TW_SYN:
1511 {
1512 struct sock *sk2;
1513
c346dca1 1514 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
5ba24953 1515 &ipv6_hdr(skb)->saddr, th->source,
0660e03f 1516 &ipv6_hdr(skb)->daddr,
505cbfc5 1517 ntohs(th->dest), inet6_iif(skb));
1da177e4 1518 if (sk2 != NULL) {
295ff7ed
ACM
1519 struct inet_timewait_sock *tw = inet_twsk(sk);
1520 inet_twsk_deschedule(tw, &tcp_death_row);
1521 inet_twsk_put(tw);
1da177e4
LT
1522 sk = sk2;
1523 goto process;
1524 }
1525 /* Fall through to ACK */
1526 }
1527 case TCP_TW_ACK:
1528 tcp_v6_timewait_ack(sk, skb);
1529 break;
1530 case TCP_TW_RST:
1531 goto no_tcp_socket;
4aa956d8
WY
1532 case TCP_TW_SUCCESS:
1533 ;
1da177e4
LT
1534 }
1535 goto discard_it;
1536}
1537
c7109986
ED
1538static void tcp_v6_early_demux(struct sk_buff *skb)
1539{
1540 const struct ipv6hdr *hdr;
1541 const struct tcphdr *th;
1542 struct sock *sk;
1543
1544 if (skb->pkt_type != PACKET_HOST)
1545 return;
1546
1547 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1548 return;
1549
1550 hdr = ipv6_hdr(skb);
1551 th = tcp_hdr(skb);
1552
1553 if (th->doff < sizeof(struct tcphdr) / 4)
1554 return;
1555
1556 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1557 &hdr->saddr, th->source,
1558 &hdr->daddr, ntohs(th->dest),
1559 inet6_iif(skb));
1560 if (sk) {
1561 skb->sk = sk;
1562 skb->destructor = sock_edemux;
1563 if (sk->sk_state != TCP_TIME_WAIT) {
1564 struct dst_entry *dst = sk->sk_rx_dst;
f3f12135 1565
c7109986 1566 if (dst)
5d299f3d 1567 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1568 if (dst &&
f3f12135 1569 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1570 skb_dst_set_noref(skb, dst);
1571 }
1572 }
1573}
1574
ccb7c410
DM
1575static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1576 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1577 .twsk_unique = tcp_twsk_unique,
4aa956d8 1578 .twsk_destructor = tcp_twsk_destructor,
ccb7c410
DM
1579};
1580
3b401a81 1581static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1582 .queue_xmit = inet6_csk_xmit,
1583 .send_check = tcp_v6_send_check,
1584 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1585 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1586 .conn_request = tcp_v6_conn_request,
1587 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1588 .net_header_len = sizeof(struct ipv6hdr),
67469601 1589 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1590 .setsockopt = ipv6_setsockopt,
1591 .getsockopt = ipv6_getsockopt,
1592 .addr2sockaddr = inet6_csk_addr2sockaddr,
1593 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1594 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1595#ifdef CONFIG_COMPAT
543d9cfe
ACM
1596 .compat_setsockopt = compat_ipv6_setsockopt,
1597 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1598#endif
4fab9071 1599 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4
LT
1600};
1601
cfb6eeb4 1602#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1603static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1604 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1605 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1606 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1607};
a928630a 1608#endif
cfb6eeb4 1609
1da177e4
LT
1610/*
1611 * TCP over IPv4 via INET6 API
1612 */
3b401a81 1613static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1614 .queue_xmit = ip_queue_xmit,
1615 .send_check = tcp_v4_send_check,
1616 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1617 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1618 .conn_request = tcp_v6_conn_request,
1619 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1620 .net_header_len = sizeof(struct iphdr),
1621 .setsockopt = ipv6_setsockopt,
1622 .getsockopt = ipv6_getsockopt,
1623 .addr2sockaddr = inet6_csk_addr2sockaddr,
1624 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1625 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1626#ifdef CONFIG_COMPAT
543d9cfe
ACM
1627 .compat_setsockopt = compat_ipv6_setsockopt,
1628 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1629#endif
4fab9071 1630 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4
LT
1631};
1632
cfb6eeb4 1633#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1634static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1635 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1636 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1637 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1638};
a928630a 1639#endif
cfb6eeb4 1640
1da177e4
LT
1641/* NOTE: A lot of things set to zero explicitly by call to
1642 * sk_alloc() so need not be done here.
1643 */
1644static int tcp_v6_init_sock(struct sock *sk)
1645{
6687e988 1646 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1647
900f65d3 1648 tcp_init_sock(sk);
1da177e4 1649
8292a17a 1650 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1651
cfb6eeb4 1652#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1653 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1654#endif
1655
1da177e4
LT
1656 return 0;
1657}
1658
7d06b2e0 1659static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1660{
1da177e4 1661 tcp_v4_destroy_sock(sk);
7d06b2e0 1662 inet6_destroy_sock(sk);
1da177e4
LT
1663}
1664
952a10be 1665#ifdef CONFIG_PROC_FS
1da177e4 1666/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1667static void get_openreq6(struct seq_file *seq,
a7cb5a49 1668 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1da177e4 1669{
1da177e4 1670 int ttd = req->expires - jiffies;
634fb979
ED
1671 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1672 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1da177e4
LT
1673
1674 if (ttd < 0)
1675 ttd = 0;
1676
1da177e4
LT
1677 seq_printf(seq,
1678 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1679 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1da177e4
LT
1680 i,
1681 src->s6_addr32[0], src->s6_addr32[1],
1682 src->s6_addr32[2], src->s6_addr32[3],
b44084c2 1683 inet_rsk(req)->ir_num,
1da177e4
LT
1684 dest->s6_addr32[0], dest->s6_addr32[1],
1685 dest->s6_addr32[2], dest->s6_addr32[3],
634fb979 1686 ntohs(inet_rsk(req)->ir_rmt_port),
1da177e4 1687 TCP_SYN_RECV,
4c99aa40 1688 0, 0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1689 1, /* timers active (only the expire timer) */
1690 jiffies_to_clock_t(ttd),
e6c022a4 1691 req->num_timeout,
a7cb5a49 1692 from_kuid_munged(seq_user_ns(seq), uid),
1ab1457c 1693 0, /* non standard timer */
1da177e4
LT
1694 0, /* open_requests have no inode */
1695 0, req);
1696}
1697
1698static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1699{
b71d1d42 1700 const struct in6_addr *dest, *src;
1da177e4
LT
1701 __u16 destp, srcp;
1702 int timer_active;
1703 unsigned long timer_expires;
cf533ea5
ED
1704 const struct inet_sock *inet = inet_sk(sp);
1705 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1706 const struct inet_connection_sock *icsk = inet_csk(sp);
0a672f74 1707 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1da177e4 1708
efe4208f
ED
1709 dest = &sp->sk_v6_daddr;
1710 src = &sp->sk_v6_rcv_saddr;
c720c7e8
ED
1711 destp = ntohs(inet->inet_dport);
1712 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
1713
1714 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1715 timer_active = 1;
463c84b9
ACM
1716 timer_expires = icsk->icsk_timeout;
1717 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1718 timer_active = 4;
463c84b9 1719 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1720 } else if (timer_pending(&sp->sk_timer)) {
1721 timer_active = 2;
1722 timer_expires = sp->sk_timer.expires;
1723 } else {
1724 timer_active = 0;
1725 timer_expires = jiffies;
1726 }
1727
1728 seq_printf(seq,
1729 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
d14c5ab6 1730 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1731 i,
1732 src->s6_addr32[0], src->s6_addr32[1],
1733 src->s6_addr32[2], src->s6_addr32[3], srcp,
1734 dest->s6_addr32[0], dest->s6_addr32[1],
1735 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 1736 sp->sk_state,
47da8ee6
SS
1737 tp->write_seq-tp->snd_una,
1738 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4 1739 timer_active,
a399a805 1740 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 1741 icsk->icsk_retransmits,
a7cb5a49 1742 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
6687e988 1743 icsk->icsk_probes_out,
1da177e4
LT
1744 sock_i_ino(sp),
1745 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1746 jiffies_to_clock_t(icsk->icsk_rto),
1747 jiffies_to_clock_t(icsk->icsk_ack.ato),
4c99aa40 1748 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
0b6a05c1 1749 tp->snd_cwnd,
0a672f74
YC
1750 sp->sk_state == TCP_LISTEN ?
1751 (fastopenq ? fastopenq->max_qlen : 0) :
1752 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1da177e4
LT
1753 );
1754}
1755
1ab1457c 1756static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1757 struct inet_timewait_sock *tw, int i)
1da177e4 1758{
b71d1d42 1759 const struct in6_addr *dest, *src;
1da177e4 1760 __u16 destp, srcp;
96f817fe 1761 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1da177e4 1762
efe4208f
ED
1763 dest = &tw->tw_v6_daddr;
1764 src = &tw->tw_v6_rcv_saddr;
1da177e4
LT
1765 destp = ntohs(tw->tw_dport);
1766 srcp = ntohs(tw->tw_sport);
1767
1768 seq_printf(seq,
1769 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1770 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1771 i,
1772 src->s6_addr32[0], src->s6_addr32[1],
1773 src->s6_addr32[2], src->s6_addr32[3], srcp,
1774 dest->s6_addr32[0], dest->s6_addr32[1],
1775 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1776 tw->tw_substate, 0, 0,
a399a805 1777 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1da177e4
LT
1778 atomic_read(&tw->tw_refcnt), tw);
1779}
1780
1da177e4
LT
1781static int tcp6_seq_show(struct seq_file *seq, void *v)
1782{
1783 struct tcp_iter_state *st;
05dbc7b5 1784 struct sock *sk = v;
1da177e4
LT
1785
1786 if (v == SEQ_START_TOKEN) {
1787 seq_puts(seq,
1788 " sl "
1789 "local_address "
1790 "remote_address "
1791 "st tx_queue rx_queue tr tm->when retrnsmt"
1792 " uid timeout inode\n");
1793 goto out;
1794 }
1795 st = seq->private;
1796
1797 switch (st->state) {
1798 case TCP_SEQ_STATE_LISTENING:
1799 case TCP_SEQ_STATE_ESTABLISHED:
05dbc7b5
ED
1800 if (sk->sk_state == TCP_TIME_WAIT)
1801 get_timewait6_sock(seq, v, st->num);
1802 else
1803 get_tcp6_sock(seq, v, st->num);
1da177e4
LT
1804 break;
1805 case TCP_SEQ_STATE_OPENREQ:
1806 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1807 break;
1da177e4
LT
1808 }
1809out:
1810 return 0;
1811}
1812
73cb88ec
AV
1813static const struct file_operations tcp6_afinfo_seq_fops = {
1814 .owner = THIS_MODULE,
1815 .open = tcp_seq_open,
1816 .read = seq_read,
1817 .llseek = seq_lseek,
1818 .release = seq_release_net
1819};
1820
1da177e4 1821static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1822 .name = "tcp6",
1823 .family = AF_INET6,
73cb88ec 1824 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1825 .seq_ops = {
1826 .show = tcp6_seq_show,
1827 },
1da177e4
LT
1828};
1829
2c8c1e72 1830int __net_init tcp6_proc_init(struct net *net)
1da177e4 1831{
6f8b13bc 1832 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1833}
1834
6f8b13bc 1835void tcp6_proc_exit(struct net *net)
1da177e4 1836{
6f8b13bc 1837 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
1838}
1839#endif
1840
f77d6021
ED
1841static void tcp_v6_clear_sk(struct sock *sk, int size)
1842{
1843 struct inet_sock *inet = inet_sk(sk);
1844
1845 /* we do not want to clear pinet6 field, because of RCU lookups */
1846 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1847
1848 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1849 memset(&inet->pinet6 + 1, 0, size);
1850}
1851
1da177e4
LT
1852struct proto tcpv6_prot = {
1853 .name = "TCPv6",
1854 .owner = THIS_MODULE,
1855 .close = tcp_close,
1856 .connect = tcp_v6_connect,
1857 .disconnect = tcp_disconnect,
463c84b9 1858 .accept = inet_csk_accept,
1da177e4
LT
1859 .ioctl = tcp_ioctl,
1860 .init = tcp_v6_init_sock,
1861 .destroy = tcp_v6_destroy_sock,
1862 .shutdown = tcp_shutdown,
1863 .setsockopt = tcp_setsockopt,
1864 .getsockopt = tcp_getsockopt,
1da177e4 1865 .recvmsg = tcp_recvmsg,
7ba42910
CG
1866 .sendmsg = tcp_sendmsg,
1867 .sendpage = tcp_sendpage,
1da177e4 1868 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 1869 .release_cb = tcp_release_cb,
1da177e4 1870 .hash = tcp_v6_hash,
ab1e0a13
ACM
1871 .unhash = inet_unhash,
1872 .get_port = inet_csk_get_port,
1da177e4 1873 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 1874 .stream_memory_free = tcp_stream_memory_free,
1da177e4
LT
1875 .sockets_allocated = &tcp_sockets_allocated,
1876 .memory_allocated = &tcp_memory_allocated,
1877 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1878 .orphan_count = &tcp_orphan_count,
a4fe34bf 1879 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
1880 .sysctl_wmem = sysctl_tcp_wmem,
1881 .sysctl_rmem = sysctl_tcp_rmem,
1882 .max_header = MAX_TCP_HEADER,
1883 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 1884 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 1885 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 1886 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 1887 .h.hashinfo = &tcp_hashinfo,
7ba42910 1888 .no_autobind = true,
543d9cfe
ACM
1889#ifdef CONFIG_COMPAT
1890 .compat_setsockopt = compat_tcp_setsockopt,
1891 .compat_getsockopt = compat_tcp_getsockopt,
1892#endif
c255a458 1893#ifdef CONFIG_MEMCG_KMEM
d1a4c0b3
GC
1894 .proto_cgroup = tcp_proto_cgroup,
1895#endif
f77d6021 1896 .clear_sk = tcp_v6_clear_sk,
1da177e4
LT
1897};
1898
41135cc8 1899static const struct inet6_protocol tcpv6_protocol = {
c7109986 1900 .early_demux = tcp_v6_early_demux,
1da177e4
LT
1901 .handler = tcp_v6_rcv,
1902 .err_handler = tcp_v6_err,
1903 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1904};
1905
1da177e4
LT
1906static struct inet_protosw tcpv6_protosw = {
1907 .type = SOCK_STREAM,
1908 .protocol = IPPROTO_TCP,
1909 .prot = &tcpv6_prot,
1910 .ops = &inet6_stream_ops,
d83d8461
ACM
1911 .flags = INET_PROTOSW_PERMANENT |
1912 INET_PROTOSW_ICSK,
1da177e4
LT
1913};
1914
2c8c1e72 1915static int __net_init tcpv6_net_init(struct net *net)
93ec926b 1916{
5677242f
DL
1917 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1918 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
1919}
1920
2c8c1e72 1921static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 1922{
5677242f 1923 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
1924}
1925
2c8c1e72 1926static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
1927{
1928 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
1929}
1930
1931static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
1932 .init = tcpv6_net_init,
1933 .exit = tcpv6_net_exit,
1934 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
1935};
1936
7f4e4868 1937int __init tcpv6_init(void)
1da177e4 1938{
7f4e4868
DL
1939 int ret;
1940
3336288a
VY
1941 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1942 if (ret)
c6b641a4 1943 goto out;
3336288a 1944
1da177e4 1945 /* register inet6 protocol */
7f4e4868
DL
1946 ret = inet6_register_protosw(&tcpv6_protosw);
1947 if (ret)
1948 goto out_tcpv6_protocol;
1949
93ec926b 1950 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1951 if (ret)
1952 goto out_tcpv6_protosw;
1953out:
1954 return ret;
ae0f7d5f 1955
7f4e4868
DL
1956out_tcpv6_protosw:
1957 inet6_unregister_protosw(&tcpv6_protosw);
3336288a
VY
1958out_tcpv6_protocol:
1959 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
7f4e4868
DL
1960 goto out;
1961}
1962
09f7709f 1963void tcpv6_exit(void)
7f4e4868 1964{
93ec926b 1965 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
1966 inet6_unregister_protosw(&tcpv6_protosw);
1967 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 1968}