[NETLINK]: Mirror UDP MSG_TRUNC semantics.
[linux-2.6-block.git] / net / ipv6 / ip6_output.c
CommitLineData
1da177e4
LT
1/*
2 * IPv6 output functions
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4
LT
7 *
8 * $Id: ip6_output.c,v 1.34 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on linux/net/ipv4/ip_output.c
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * Changes:
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
22 * etc.
23 *
24 * H. von Brand : Added missing #include <linux/string.h>
25 * Imran Patel : frag id should be in NBO
26 * Kazunori MIYAZAWA @USAGI
27 * : add ip6_append_data and related functions
28 * for datagram xmit
29 */
30
1da177e4
LT
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/string.h>
34#include <linux/socket.h>
35#include <linux/net.h>
36#include <linux/netdevice.h>
37#include <linux/if_arp.h>
38#include <linux/in6.h>
39#include <linux/tcp.h>
40#include <linux/route.h>
b59f45d0 41#include <linux/module.h>
1da177e4
LT
42
43#include <linux/netfilter.h>
44#include <linux/netfilter_ipv6.h>
45
46#include <net/sock.h>
47#include <net/snmp.h>
48
49#include <net/ipv6.h>
50#include <net/ndisc.h>
51#include <net/protocol.h>
52#include <net/ip6_route.h>
53#include <net/addrconf.h>
54#include <net/rawv6.h>
55#include <net/icmp.h>
56#include <net/xfrm.h>
57#include <net/checksum.h>
58
59static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
60
61static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
62{
63 static u32 ipv6_fragmentation_id = 1;
64 static DEFINE_SPINLOCK(ip6_id_lock);
65
66 spin_lock_bh(&ip6_id_lock);
67 fhdr->identification = htonl(ipv6_fragmentation_id);
68 if (++ipv6_fragmentation_id == 0)
69 ipv6_fragmentation_id = 1;
70 spin_unlock_bh(&ip6_id_lock);
71}
72
73static inline int ip6_output_finish(struct sk_buff *skb)
74{
1da177e4 75 struct dst_entry *dst = skb->dst;
1da177e4 76
3644f0ce
SH
77 if (dst->hh)
78 return neigh_hh_output(dst->hh, skb);
79 else if (dst->neighbour)
1da177e4
LT
80 return dst->neighbour->output(skb);
81
a11d206d 82 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
1da177e4
LT
83 kfree_skb(skb);
84 return -EINVAL;
85
86}
87
88/* dev_loopback_xmit for use with netfilter. */
89static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
90{
91 newskb->mac.raw = newskb->data;
92 __skb_pull(newskb, newskb->nh.raw - newskb->data);
93 newskb->pkt_type = PACKET_LOOPBACK;
94 newskb->ip_summed = CHECKSUM_UNNECESSARY;
95 BUG_TRAP(newskb->dst);
96
97 netif_rx(newskb);
98 return 0;
99}
100
101
102static int ip6_output2(struct sk_buff *skb)
103{
104 struct dst_entry *dst = skb->dst;
105 struct net_device *dev = dst->dev;
106
107 skb->protocol = htons(ETH_P_IPV6);
108 skb->dev = dev;
109
110 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
111 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
a11d206d 112 struct inet6_dev *idev = ip6_dst_idev(skb->dst);
1da177e4
LT
113
114 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
115 ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr,
116 &skb->nh.ipv6h->saddr)) {
117 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
118
119 /* Do not check for IFF_ALLMULTI; multicast routing
120 is not supported in any case.
121 */
122 if (newskb)
123 NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, newskb, NULL,
124 newskb->dev,
125 ip6_dev_loopback_xmit);
126
127 if (skb->nh.ipv6h->hop_limit == 0) {
a11d206d 128 IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
129 kfree_skb(skb);
130 return 0;
131 }
132 }
133
a11d206d 134 IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS);
1da177e4
LT
135 }
136
137 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
138}
139
140int ip6_output(struct sk_buff *skb)
141{
89114afd 142 if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
e89e9cf5 143 dst_allfrag(skb->dst))
1da177e4
LT
144 return ip6_fragment(skb, ip6_output2);
145 else
146 return ip6_output2(skb);
147}
148
1da177e4
LT
149/*
150 * xmit an sk_buff (used by TCP)
151 */
152
153int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
154 struct ipv6_txoptions *opt, int ipfragok)
155{
b30bd282 156 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4
LT
157 struct in6_addr *first_hop = &fl->fl6_dst;
158 struct dst_entry *dst = skb->dst;
159 struct ipv6hdr *hdr;
160 u8 proto = fl->proto;
161 int seg_len = skb->len;
41a1f8ea 162 int hlimit, tclass;
1da177e4
LT
163 u32 mtu;
164
165 if (opt) {
166 int head_room;
167
168 /* First: exthdrs may take lots of space (~8K for now)
169 MAX_HEADER is not enough.
170 */
171 head_room = opt->opt_nflen + opt->opt_flen;
172 seg_len += head_room;
173 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
174
175 if (skb_headroom(skb) < head_room) {
176 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
a11d206d
YH
177 if (skb2 == NULL) {
178 IP6_INC_STATS(ip6_dst_idev(skb->dst),
179 IPSTATS_MIB_OUTDISCARDS);
180 kfree_skb(skb);
1da177e4
LT
181 return -ENOBUFS;
182 }
a11d206d
YH
183 kfree_skb(skb);
184 skb = skb2;
1da177e4
LT
185 if (sk)
186 skb_set_owner_w(skb, sk);
187 }
188 if (opt->opt_flen)
189 ipv6_push_frag_opts(skb, opt, &proto);
190 if (opt->opt_nflen)
191 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
192 }
193
194 hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
195
196 /*
197 * Fill in the IPv6 header
198 */
199
1da177e4
LT
200 hlimit = -1;
201 if (np)
202 hlimit = np->hop_limit;
203 if (hlimit < 0)
204 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
205 if (hlimit < 0)
206 hlimit = ipv6_get_hoplimit(dst->dev);
207
41a1f8ea
YH
208 tclass = -1;
209 if (np)
210 tclass = np->tclass;
211 if (tclass < 0)
212 tclass = 0;
213
90bcaf7b 214 *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
41a1f8ea 215
1da177e4
LT
216 hdr->payload_len = htons(seg_len);
217 hdr->nexthdr = proto;
218 hdr->hop_limit = hlimit;
219
220 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
221 ipv6_addr_copy(&hdr->daddr, first_hop);
222
a2c2064f
PM
223 skb->priority = sk->sk_priority;
224
1da177e4 225 mtu = dst_mtu(dst);
89114afd 226 if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
a11d206d
YH
227 IP6_INC_STATS(ip6_dst_idev(skb->dst),
228 IPSTATS_MIB_OUTREQUESTS);
6869c4d8
HW
229 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
230 dst_output);
1da177e4
LT
231 }
232
233 if (net_ratelimit())
234 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
235 skb->dev = dst->dev;
236 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
a11d206d 237 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
238 kfree_skb(skb);
239 return -EMSGSIZE;
240}
241
242/*
243 * To avoid extra problems ND packets are send through this
244 * routine. It's code duplication but I really want to avoid
245 * extra checks since ipv6_build_header is used by TCP (which
246 * is for us performance critical)
247 */
248
249int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
250 struct in6_addr *saddr, struct in6_addr *daddr,
251 int proto, int len)
252{
253 struct ipv6_pinfo *np = inet6_sk(sk);
254 struct ipv6hdr *hdr;
255 int totlen;
256
257 skb->protocol = htons(ETH_P_IPV6);
258 skb->dev = dev;
259
260 totlen = len + sizeof(struct ipv6hdr);
261
262 hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
263 skb->nh.ipv6h = hdr;
264
ae08e1f0 265 *(__be32*)hdr = htonl(0x60000000);
1da177e4
LT
266
267 hdr->payload_len = htons(len);
268 hdr->nexthdr = proto;
269 hdr->hop_limit = np->hop_limit;
270
271 ipv6_addr_copy(&hdr->saddr, saddr);
272 ipv6_addr_copy(&hdr->daddr, daddr);
273
274 return 0;
275}
276
277static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
278{
279 struct ip6_ra_chain *ra;
280 struct sock *last = NULL;
281
282 read_lock(&ip6_ra_lock);
283 for (ra = ip6_ra_chain; ra; ra = ra->next) {
284 struct sock *sk = ra->sk;
0bd1b59b
AM
285 if (sk && ra->sel == sel &&
286 (!sk->sk_bound_dev_if ||
287 sk->sk_bound_dev_if == skb->dev->ifindex)) {
1da177e4
LT
288 if (last) {
289 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
290 if (skb2)
291 rawv6_rcv(last, skb2);
292 }
293 last = sk;
294 }
295 }
296
297 if (last) {
298 rawv6_rcv(last, skb);
299 read_unlock(&ip6_ra_lock);
300 return 1;
301 }
302 read_unlock(&ip6_ra_lock);
303 return 0;
304}
305
e21e0b5f
VN
306static int ip6_forward_proxy_check(struct sk_buff *skb)
307{
308 struct ipv6hdr *hdr = skb->nh.ipv6h;
309 u8 nexthdr = hdr->nexthdr;
310 int offset;
311
312 if (ipv6_ext_hdr(nexthdr)) {
313 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
314 if (offset < 0)
315 return 0;
316 } else
317 offset = sizeof(struct ipv6hdr);
318
319 if (nexthdr == IPPROTO_ICMPV6) {
320 struct icmp6hdr *icmp6;
321
322 if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data))
323 return 0;
324
325 icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset);
326
327 switch (icmp6->icmp6_type) {
328 case NDISC_ROUTER_SOLICITATION:
329 case NDISC_ROUTER_ADVERTISEMENT:
330 case NDISC_NEIGHBOUR_SOLICITATION:
331 case NDISC_NEIGHBOUR_ADVERTISEMENT:
332 case NDISC_REDIRECT:
333 /* For reaction involving unicast neighbor discovery
334 * message destined to the proxied address, pass it to
335 * input function.
336 */
337 return 1;
338 default:
339 break;
340 }
341 }
342
74553b09
VN
343 /*
344 * The proxying router can't forward traffic sent to a link-local
345 * address, so signal the sender and discard the packet. This
346 * behavior is clarified by the MIPv6 specification.
347 */
348 if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
349 dst_link_failure(skb);
350 return -1;
351 }
352
e21e0b5f
VN
353 return 0;
354}
355
1da177e4
LT
356static inline int ip6_forward_finish(struct sk_buff *skb)
357{
358 return dst_output(skb);
359}
360
361int ip6_forward(struct sk_buff *skb)
362{
363 struct dst_entry *dst = skb->dst;
364 struct ipv6hdr *hdr = skb->nh.ipv6h;
365 struct inet6_skb_parm *opt = IP6CB(skb);
1ab1457c 366
1da177e4
LT
367 if (ipv6_devconf.forwarding == 0)
368 goto error;
369
370 if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
a11d206d 371 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
1da177e4
LT
372 goto drop;
373 }
374
375 skb->ip_summed = CHECKSUM_NONE;
376
377 /*
378 * We DO NOT make any processing on
379 * RA packets, pushing them to user level AS IS
380 * without ane WARRANTY that application will be able
381 * to interpret them. The reason is that we
382 * cannot make anything clever here.
383 *
384 * We are not end-node, so that if packet contains
385 * AH/ESP, we cannot make anything.
386 * Defragmentation also would be mistake, RA packets
387 * cannot be fragmented, because there is no warranty
388 * that different fragments will go along one path. --ANK
389 */
390 if (opt->ra) {
391 u8 *ptr = skb->nh.raw + opt->ra;
392 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
393 return 0;
394 }
395
396 /*
397 * check and decrement ttl
398 */
399 if (hdr->hop_limit <= 1) {
400 /* Force OUTPUT device used as source address */
401 skb->dev = dst->dev;
402 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
403 0, skb->dev);
a11d206d 404 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
1da177e4
LT
405
406 kfree_skb(skb);
407 return -ETIMEDOUT;
408 }
409
fbea49e1
YH
410 /* XXX: idev->cnf.proxy_ndp? */
411 if (ipv6_devconf.proxy_ndp &&
412 pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) {
74553b09
VN
413 int proxied = ip6_forward_proxy_check(skb);
414 if (proxied > 0)
e21e0b5f 415 return ip6_input(skb);
74553b09 416 else if (proxied < 0) {
a11d206d 417 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
74553b09
VN
418 goto drop;
419 }
e21e0b5f
VN
420 }
421
1da177e4 422 if (!xfrm6_route_forward(skb)) {
a11d206d 423 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
1da177e4
LT
424 goto drop;
425 }
426 dst = skb->dst;
427
428 /* IPv6 specs say nothing about it, but it is clear that we cannot
429 send redirects to source routed frames.
430 */
431 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
432 struct in6_addr *target = NULL;
433 struct rt6_info *rt;
434 struct neighbour *n = dst->neighbour;
435
436 /*
437 * incoming and outgoing devices are the same
438 * send a redirect.
439 */
440
441 rt = (struct rt6_info *) dst;
442 if ((rt->rt6i_flags & RTF_GATEWAY))
443 target = (struct in6_addr*)&n->primary_key;
444 else
445 target = &hdr->daddr;
446
447 /* Limit redirects both by destination (here)
448 and by source (inside ndisc_send_redirect)
449 */
450 if (xrlim_allow(dst, 1*HZ))
451 ndisc_send_redirect(skb, n, target);
452 } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
453 |IPV6_ADDR_LINKLOCAL)) {
454 /* This check is security critical. */
455 goto error;
456 }
457
458 if (skb->len > dst_mtu(dst)) {
459 /* Again, force OUTPUT device used as source address */
460 skb->dev = dst->dev;
461 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
a11d206d
YH
462 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
463 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
464 kfree_skb(skb);
465 return -EMSGSIZE;
466 }
467
468 if (skb_cow(skb, dst->dev->hard_header_len)) {
a11d206d 469 IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
470 goto drop;
471 }
472
473 hdr = skb->nh.ipv6h;
474
475 /* Mangling hops number delayed to point after skb COW */
1ab1457c 476
1da177e4
LT
477 hdr->hop_limit--;
478
a11d206d 479 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
1da177e4
LT
480 return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
481
482error:
a11d206d 483 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
1da177e4
LT
484drop:
485 kfree_skb(skb);
486 return -EINVAL;
487}
488
489static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
490{
491 to->pkt_type = from->pkt_type;
492 to->priority = from->priority;
493 to->protocol = from->protocol;
1da177e4
LT
494 dst_release(to->dst);
495 to->dst = dst_clone(from->dst);
496 to->dev = from->dev;
82e91ffe 497 to->mark = from->mark;
1da177e4
LT
498
499#ifdef CONFIG_NET_SCHED
500 to->tc_index = from->tc_index;
501#endif
502#ifdef CONFIG_NETFILTER
1da177e4 503 /* Connection association is same as pre-frag packet */
9fb9cbb1 504 nf_conntrack_put(to->nfct);
1da177e4
LT
505 to->nfct = from->nfct;
506 nf_conntrack_get(to->nfct);
507 to->nfctinfo = from->nfctinfo;
9fb9cbb1
YK
508#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
509 nf_conntrack_put_reasm(to->nfct_reasm);
510 to->nfct_reasm = from->nfct_reasm;
511 nf_conntrack_get_reasm(to->nfct_reasm);
512#endif
1da177e4
LT
513#ifdef CONFIG_BRIDGE_NETFILTER
514 nf_bridge_put(to->nf_bridge);
515 to->nf_bridge = from->nf_bridge;
516 nf_bridge_get(to->nf_bridge);
517#endif
1da177e4 518#endif
984bc16c 519 skb_copy_secmark(to, from);
1da177e4
LT
520}
521
522int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
523{
524 u16 offset = sizeof(struct ipv6hdr);
525 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1);
526 unsigned int packet_len = skb->tail - skb->nh.raw;
527 int found_rhdr = 0;
528 *nexthdr = &skb->nh.ipv6h->nexthdr;
529
530 while (offset + 1 <= packet_len) {
531
532 switch (**nexthdr) {
533
534 case NEXTHDR_HOP:
27637df9 535 break;
1da177e4 536 case NEXTHDR_ROUTING:
27637df9
MN
537 found_rhdr = 1;
538 break;
1da177e4 539 case NEXTHDR_DEST:
27637df9
MN
540#ifdef CONFIG_IPV6_MIP6
541 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
542 break;
543#endif
544 if (found_rhdr)
545 return offset;
1da177e4
LT
546 break;
547 default :
548 return offset;
549 }
27637df9
MN
550
551 offset += ipv6_optlen(exthdr);
552 *nexthdr = &exthdr->nexthdr;
553 exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
1da177e4
LT
554 }
555
556 return offset;
557}
b59f45d0 558EXPORT_SYMBOL_GPL(ip6_find_1stfragopt);
1da177e4
LT
559
560static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
561{
562 struct net_device *dev;
563 struct sk_buff *frag;
564 struct rt6_info *rt = (struct rt6_info*)skb->dst;
d91675f9 565 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
1da177e4
LT
566 struct ipv6hdr *tmp_hdr;
567 struct frag_hdr *fh;
568 unsigned int mtu, hlen, left, len;
ae08e1f0 569 __be32 frag_id = 0;
1da177e4
LT
570 int ptr, offset = 0, err=0;
571 u8 *prevhdr, nexthdr = 0;
572
573 dev = rt->u.dst.dev;
574 hlen = ip6_find_1stfragopt(skb, &prevhdr);
575 nexthdr = *prevhdr;
576
d91675f9
YH
577 mtu = dst_mtu(&rt->u.dst);
578 if (np && np->frag_size < mtu) {
579 if (np->frag_size)
580 mtu = np->frag_size;
581 }
582 mtu -= hlen + sizeof(struct frag_hdr);
1da177e4
LT
583
584 if (skb_shinfo(skb)->frag_list) {
585 int first_len = skb_pagelen(skb);
586
587 if (first_len - hlen > mtu ||
588 ((first_len - hlen) & 7) ||
589 skb_cloned(skb))
590 goto slow_path;
591
592 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
593 /* Correct geometry. */
594 if (frag->len > mtu ||
595 ((frag->len & 7) && frag->next) ||
596 skb_headroom(frag) < hlen)
597 goto slow_path;
598
1da177e4
LT
599 /* Partially cloned skb? */
600 if (skb_shared(frag))
601 goto slow_path;
2fdba6b0
HX
602
603 BUG_ON(frag->sk);
604 if (skb->sk) {
605 sock_hold(skb->sk);
606 frag->sk = skb->sk;
607 frag->destructor = sock_wfree;
608 skb->truesize -= frag->truesize;
609 }
1da177e4
LT
610 }
611
612 err = 0;
613 offset = 0;
614 frag = skb_shinfo(skb)->frag_list;
615 skb_shinfo(skb)->frag_list = NULL;
616 /* BUILD HEADER */
617
9a217a1c 618 *prevhdr = NEXTHDR_FRAGMENT;
af879cc7 619 tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
1da177e4 620 if (!tmp_hdr) {
a11d206d 621 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
622 return -ENOMEM;
623 }
624
1da177e4
LT
625 __skb_pull(skb, hlen);
626 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
627 skb->nh.raw = __skb_push(skb, hlen);
628 memcpy(skb->nh.raw, tmp_hdr, hlen);
629
630 ipv6_select_ident(skb, fh);
631 fh->nexthdr = nexthdr;
632 fh->reserved = 0;
633 fh->frag_off = htons(IP6_MF);
634 frag_id = fh->identification;
635
636 first_len = skb_pagelen(skb);
637 skb->data_len = first_len - skb_headlen(skb);
638 skb->len = first_len;
639 skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
a11d206d
YH
640
641 dst_hold(&rt->u.dst);
1da177e4
LT
642
643 for (;;) {
644 /* Prepare header of the next frame,
645 * before previous one went down. */
646 if (frag) {
647 frag->ip_summed = CHECKSUM_NONE;
648 frag->h.raw = frag->data;
649 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
650 frag->nh.raw = __skb_push(frag, hlen);
651 memcpy(frag->nh.raw, tmp_hdr, hlen);
652 offset += skb->len - hlen - sizeof(struct frag_hdr);
653 fh->nexthdr = nexthdr;
654 fh->reserved = 0;
655 fh->frag_off = htons(offset);
656 if (frag->next != NULL)
657 fh->frag_off |= htons(IP6_MF);
658 fh->identification = frag_id;
659 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
660 ip6_copy_metadata(frag, skb);
661 }
1ab1457c 662
1da177e4 663 err = output(skb);
dafee490 664 if(!err)
a11d206d 665 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES);
dafee490 666
1da177e4
LT
667 if (err || !frag)
668 break;
669
670 skb = frag;
671 frag = skb->next;
672 skb->next = NULL;
673 }
674
a51482bd 675 kfree(tmp_hdr);
1da177e4
LT
676
677 if (err == 0) {
a11d206d
YH
678 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS);
679 dst_release(&rt->u.dst);
1da177e4
LT
680 return 0;
681 }
682
683 while (frag) {
684 skb = frag->next;
685 kfree_skb(frag);
686 frag = skb;
687 }
688
a11d206d
YH
689 IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS);
690 dst_release(&rt->u.dst);
1da177e4
LT
691 return err;
692 }
693
694slow_path:
695 left = skb->len - hlen; /* Space per frame */
696 ptr = hlen; /* Where to start from */
697
698 /*
699 * Fragment the datagram.
700 */
701
702 *prevhdr = NEXTHDR_FRAGMENT;
703
704 /*
705 * Keep copying data until we run out.
706 */
707 while(left > 0) {
708 len = left;
709 /* IF: it doesn't fit, use 'mtu' - the data space left */
710 if (len > mtu)
711 len = mtu;
712 /* IF: we are not sending upto and including the packet end
713 then align the next start on an eight byte boundary */
714 if (len < left) {
715 len &= ~7;
716 }
717 /*
718 * Allocate buffer.
719 */
720
721 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
64ce2073 722 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
a11d206d
YH
723 IP6_INC_STATS(ip6_dst_idev(skb->dst),
724 IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
725 err = -ENOMEM;
726 goto fail;
727 }
728
729 /*
730 * Set up data on packet
731 */
732
733 ip6_copy_metadata(frag, skb);
734 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
735 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
736 frag->nh.raw = frag->data;
737 fh = (struct frag_hdr*)(frag->data + hlen);
738 frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);
739
740 /*
741 * Charge the memory for the fragment to any owner
742 * it might possess
743 */
744 if (skb->sk)
745 skb_set_owner_w(frag, skb->sk);
746
747 /*
748 * Copy the packet header into the new buffer.
749 */
750 memcpy(frag->nh.raw, skb->data, hlen);
751
752 /*
753 * Build fragment header.
754 */
755 fh->nexthdr = nexthdr;
756 fh->reserved = 0;
f36d6ab1 757 if (!frag_id) {
1da177e4
LT
758 ipv6_select_ident(skb, fh);
759 frag_id = fh->identification;
760 } else
761 fh->identification = frag_id;
762
763 /*
764 * Copy a block of the IP datagram.
765 */
766 if (skb_copy_bits(skb, ptr, frag->h.raw, len))
767 BUG();
768 left -= len;
769
770 fh->frag_off = htons(offset);
771 if (left > 0)
772 fh->frag_off |= htons(IP6_MF);
773 frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
774
775 ptr += len;
776 offset += len;
777
778 /*
779 * Put this fragment into the sending queue.
780 */
1da177e4
LT
781 err = output(frag);
782 if (err)
783 goto fail;
dafee490 784
a11d206d 785 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES);
1da177e4 786 }
a11d206d
YH
787 IP6_INC_STATS(ip6_dst_idev(skb->dst),
788 IPSTATS_MIB_FRAGOKS);
1da177e4 789 kfree_skb(skb);
1da177e4
LT
790 return err;
791
792fail:
a11d206d
YH
793 IP6_INC_STATS(ip6_dst_idev(skb->dst),
794 IPSTATS_MIB_FRAGFAILS);
1ab1457c 795 kfree_skb(skb);
1da177e4
LT
796 return err;
797}
798
cf6b1982
YH
799static inline int ip6_rt_check(struct rt6key *rt_key,
800 struct in6_addr *fl_addr,
801 struct in6_addr *addr_cache)
802{
803 return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
804 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
805}
806
497c615a
HX
807static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
808 struct dst_entry *dst,
809 struct flowi *fl)
1da177e4 810{
497c615a
HX
811 struct ipv6_pinfo *np = inet6_sk(sk);
812 struct rt6_info *rt = (struct rt6_info *)dst;
1da177e4 813
497c615a
HX
814 if (!dst)
815 goto out;
816
817 /* Yes, checking route validity in not connected
818 * case is not very simple. Take into account,
819 * that we do not support routing by source, TOS,
820 * and MSG_DONTROUTE --ANK (980726)
821 *
cf6b1982
YH
822 * 1. ip6_rt_check(): If route was host route,
823 * check that cached destination is current.
497c615a
HX
824 * If it is network route, we still may
825 * check its validity using saved pointer
826 * to the last used address: daddr_cache.
827 * We do not want to save whole address now,
828 * (because main consumer of this service
829 * is tcp, which has not this problem),
830 * so that the last trick works only on connected
831 * sockets.
832 * 2. oif also should be the same.
833 */
cf6b1982 834 if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) ||
8e1ef0a9
YH
835#ifdef CONFIG_IPV6_SUBTREES
836 ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
837#endif
cf6b1982 838 (fl->oif && fl->oif != dst->dev->ifindex)) {
497c615a
HX
839 dst_release(dst);
840 dst = NULL;
1da177e4
LT
841 }
842
497c615a
HX
843out:
844 return dst;
845}
846
847static int ip6_dst_lookup_tail(struct sock *sk,
848 struct dst_entry **dst, struct flowi *fl)
849{
850 int err;
851
1da177e4
LT
852 if (*dst == NULL)
853 *dst = ip6_route_output(sk, fl);
854
855 if ((err = (*dst)->error))
856 goto out_err_release;
857
858 if (ipv6_addr_any(&fl->fl6_src)) {
859 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
44456d37 860 if (err)
1da177e4 861 goto out_err_release;
1da177e4
LT
862 }
863
864 return 0;
865
866out_err_release:
867 dst_release(*dst);
868 *dst = NULL;
869 return err;
870}
34a0b3cd 871
497c615a
HX
872/**
873 * ip6_dst_lookup - perform route lookup on flow
874 * @sk: socket which provides route info
875 * @dst: pointer to dst_entry * for result
876 * @fl: flow to lookup
877 *
878 * This function performs a route lookup on the given flow.
879 *
880 * It returns zero on success, or a standard errno code on error.
881 */
882int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
883{
884 *dst = NULL;
885 return ip6_dst_lookup_tail(sk, dst, fl);
886}
3cf3dc6c
ACM
887EXPORT_SYMBOL_GPL(ip6_dst_lookup);
888
497c615a
HX
889/**
890 * ip6_sk_dst_lookup - perform socket cached route lookup on flow
891 * @sk: socket which provides the dst cache and route info
892 * @dst: pointer to dst_entry * for result
893 * @fl: flow to lookup
894 *
895 * This function performs a route lookup on the given flow with the
896 * possibility of using the cached route in the socket if it is valid.
897 * It will take the socket dst lock when operating on the dst cache.
898 * As a result, this function can only be used in process context.
899 *
900 * It returns zero on success, or a standard errno code on error.
901 */
902int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl)
903{
904 *dst = NULL;
905 if (sk) {
906 *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
907 *dst = ip6_sk_dst_check(sk, *dst, fl);
908 }
909
910 return ip6_dst_lookup_tail(sk, dst, fl);
911}
912EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup);
913
34a0b3cd 914static inline int ip6_ufo_append_data(struct sock *sk,
e89e9cf5
AR
915 int getfrag(void *from, char *to, int offset, int len,
916 int odd, struct sk_buff *skb),
917 void *from, int length, int hh_len, int fragheaderlen,
918 int transhdrlen, int mtu,unsigned int flags)
919
920{
921 struct sk_buff *skb;
922 int err;
923
924 /* There is support for UDP large send offload by network
925 * device, so create one single skb packet containing complete
926 * udp datagram
927 */
928 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
929 skb = sock_alloc_send_skb(sk,
930 hh_len + fragheaderlen + transhdrlen + 20,
931 (flags & MSG_DONTWAIT), &err);
932 if (skb == NULL)
933 return -ENOMEM;
934
935 /* reserve space for Hardware header */
936 skb_reserve(skb, hh_len);
937
938 /* create space for UDP/IP header */
939 skb_put(skb,fragheaderlen + transhdrlen);
940
941 /* initialize network header pointer */
942 skb->nh.raw = skb->data;
943
944 /* initialize protocol header pointer */
945 skb->h.raw = skb->data + fragheaderlen;
946
84fa7933 947 skb->ip_summed = CHECKSUM_PARTIAL;
e89e9cf5
AR
948 skb->csum = 0;
949 sk->sk_sndmsg_off = 0;
950 }
951
952 err = skb_append_datato_frags(sk,skb, getfrag, from,
953 (length - transhdrlen));
954 if (!err) {
955 struct frag_hdr fhdr;
956
957 /* specify the length of each IP datagram fragment*/
1ab1457c 958 skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
7967168c 959 sizeof(struct frag_hdr);
f83ef8c0 960 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
e89e9cf5
AR
961 ipv6_select_ident(skb, &fhdr);
962 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
963 __skb_queue_tail(&sk->sk_write_queue, skb);
964
965 return 0;
966 }
967 /* There is not enough support do UPD LSO,
968 * so follow normal path
969 */
970 kfree_skb(skb);
971
972 return err;
973}
1da177e4 974
41a1f8ea
YH
975int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
976 int offset, int len, int odd, struct sk_buff *skb),
977 void *from, int length, int transhdrlen,
978 int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
979 struct rt6_info *rt, unsigned int flags)
1da177e4
LT
980{
981 struct inet_sock *inet = inet_sk(sk);
982 struct ipv6_pinfo *np = inet6_sk(sk);
983 struct sk_buff *skb;
984 unsigned int maxfraglen, fragheaderlen;
985 int exthdrlen;
986 int hh_len;
987 int mtu;
988 int copy;
989 int err;
990 int offset = 0;
991 int csummode = CHECKSUM_NONE;
992
993 if (flags&MSG_PROBE)
994 return 0;
995 if (skb_queue_empty(&sk->sk_write_queue)) {
996 /*
997 * setup for corking
998 */
999 if (opt) {
1000 if (np->cork.opt == NULL) {
1001 np->cork.opt = kmalloc(opt->tot_len,
1002 sk->sk_allocation);
1003 if (unlikely(np->cork.opt == NULL))
1004 return -ENOBUFS;
1005 } else if (np->cork.opt->tot_len < opt->tot_len) {
1006 printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
1007 return -EINVAL;
1008 }
1009 memcpy(np->cork.opt, opt, opt->tot_len);
1010 inet->cork.flags |= IPCORK_OPT;
1011 /* need source address above miyazawa*/
1012 }
1013 dst_hold(&rt->u.dst);
1014 np->cork.rt = rt;
1015 inet->cork.fl = *fl;
1016 np->cork.hop_limit = hlimit;
41a1f8ea 1017 np->cork.tclass = tclass;
d91675f9 1018 mtu = dst_mtu(rt->u.dst.path);
c7503609 1019 if (np->frag_size < mtu) {
d91675f9
YH
1020 if (np->frag_size)
1021 mtu = np->frag_size;
1022 }
1023 inet->cork.fragsize = mtu;
1da177e4
LT
1024 if (dst_allfrag(rt->u.dst.path))
1025 inet->cork.flags |= IPCORK_ALLFRAG;
1026 inet->cork.length = 0;
1027 sk->sk_sndmsg_page = NULL;
1028 sk->sk_sndmsg_off = 0;
1029 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
1030 length += exthdrlen;
1031 transhdrlen += exthdrlen;
1032 } else {
1033 rt = np->cork.rt;
1034 fl = &inet->cork.fl;
1035 if (inet->cork.flags & IPCORK_OPT)
1036 opt = np->cork.opt;
1037 transhdrlen = 0;
1038 exthdrlen = 0;
1039 mtu = inet->cork.fragsize;
1040 }
1041
1042 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1043
1b5c2299 1044 fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0);
1da177e4
LT
1045 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1046
1047 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1048 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1049 ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
1050 return -EMSGSIZE;
1051 }
1052 }
1053
1054 /*
1055 * Let's try using as much space as possible.
1056 * Use MTU if total length of the message fits into the MTU.
1057 * Otherwise, we need to reserve fragment header and
1058 * fragment alignment (= 8-15 octects, in total).
1059 *
1060 * Note that we may need to "move" the data from the tail of
1ab1457c 1061 * of the buffer to the new fragment when we split
1da177e4
LT
1062 * the message.
1063 *
1ab1457c 1064 * FIXME: It may be fragmented into multiple chunks
1da177e4
LT
1065 * at once if non-fragmentable extension headers
1066 * are too large.
1ab1457c 1067 * --yoshfuji
1da177e4
LT
1068 */
1069
1070 inet->cork.length += length;
e89e9cf5
AR
1071 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
1072 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1073
baa829d8
PM
1074 err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
1075 fragheaderlen, transhdrlen, mtu,
1076 flags);
1077 if (err)
e89e9cf5 1078 goto error;
e89e9cf5
AR
1079 return 0;
1080 }
1da177e4
LT
1081
1082 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1083 goto alloc_new_skb;
1084
1085 while (length > 0) {
1086 /* Check if the remaining data fits into current packet. */
1087 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1088 if (copy < length)
1089 copy = maxfraglen - skb->len;
1090
1091 if (copy <= 0) {
1092 char *data;
1093 unsigned int datalen;
1094 unsigned int fraglen;
1095 unsigned int fraggap;
1096 unsigned int alloclen;
1097 struct sk_buff *skb_prev;
1098alloc_new_skb:
1099 skb_prev = skb;
1100
1101 /* There's no room in the current skb */
1102 if (skb_prev)
1103 fraggap = skb_prev->len - maxfraglen;
1104 else
1105 fraggap = 0;
1106
1107 /*
1108 * If remaining data exceeds the mtu,
1109 * we know we need more fragment(s).
1110 */
1111 datalen = length + fraggap;
1112 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1113 datalen = maxfraglen - fragheaderlen;
1114
1115 fraglen = datalen + fragheaderlen;
1116 if ((flags & MSG_MORE) &&
1117 !(rt->u.dst.dev->features&NETIF_F_SG))
1118 alloclen = mtu;
1119 else
1120 alloclen = datalen + fragheaderlen;
1121
1122 /*
1123 * The last fragment gets additional space at tail.
1124 * Note: we overallocate on fragments with MSG_MODE
1125 * because we have no idea if we're the last one.
1126 */
1127 if (datalen == length + fraggap)
1128 alloclen += rt->u.dst.trailer_len;
1129
1130 /*
1131 * We just reserve space for fragment header.
1ab1457c 1132 * Note: this may be overallocation if the message
1da177e4
LT
1133 * (without MSG_MORE) fits into the MTU.
1134 */
1135 alloclen += sizeof(struct frag_hdr);
1136
1137 if (transhdrlen) {
1138 skb = sock_alloc_send_skb(sk,
1139 alloclen + hh_len,
1140 (flags & MSG_DONTWAIT), &err);
1141 } else {
1142 skb = NULL;
1143 if (atomic_read(&sk->sk_wmem_alloc) <=
1144 2 * sk->sk_sndbuf)
1145 skb = sock_wmalloc(sk,
1146 alloclen + hh_len, 1,
1147 sk->sk_allocation);
1148 if (unlikely(skb == NULL))
1149 err = -ENOBUFS;
1150 }
1151 if (skb == NULL)
1152 goto error;
1153 /*
1154 * Fill in the control structures
1155 */
1156 skb->ip_summed = csummode;
1157 skb->csum = 0;
1158 /* reserve for fragmentation */
1159 skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1160
1161 /*
1162 * Find where to start putting bytes
1163 */
1164 data = skb_put(skb, fraglen);
1165 skb->nh.raw = data + exthdrlen;
1166 data += fragheaderlen;
1167 skb->h.raw = data + exthdrlen;
1168
1169 if (fraggap) {
1170 skb->csum = skb_copy_and_csum_bits(
1171 skb_prev, maxfraglen,
1172 data + transhdrlen, fraggap, 0);
1173 skb_prev->csum = csum_sub(skb_prev->csum,
1174 skb->csum);
1175 data += fraggap;
e9fa4f7b 1176 pskb_trim_unique(skb_prev, maxfraglen);
1da177e4
LT
1177 }
1178 copy = datalen - transhdrlen - fraggap;
1179 if (copy < 0) {
1180 err = -EINVAL;
1181 kfree_skb(skb);
1182 goto error;
1183 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1184 err = -EFAULT;
1185 kfree_skb(skb);
1186 goto error;
1187 }
1188
1189 offset += copy;
1190 length -= datalen - fraggap;
1191 transhdrlen = 0;
1192 exthdrlen = 0;
1193 csummode = CHECKSUM_NONE;
1194
1195 /*
1196 * Put the packet on the pending queue
1197 */
1198 __skb_queue_tail(&sk->sk_write_queue, skb);
1199 continue;
1200 }
1201
1202 if (copy > length)
1203 copy = length;
1204
1205 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1206 unsigned int off;
1207
1208 off = skb->len;
1209 if (getfrag(from, skb_put(skb, copy),
1210 offset, copy, off, skb) < 0) {
1211 __skb_trim(skb, off);
1212 err = -EFAULT;
1213 goto error;
1214 }
1215 } else {
1216 int i = skb_shinfo(skb)->nr_frags;
1217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1218 struct page *page = sk->sk_sndmsg_page;
1219 int off = sk->sk_sndmsg_off;
1220 unsigned int left;
1221
1222 if (page && (left = PAGE_SIZE - off) > 0) {
1223 if (copy >= left)
1224 copy = left;
1225 if (page != frag->page) {
1226 if (i == MAX_SKB_FRAGS) {
1227 err = -EMSGSIZE;
1228 goto error;
1229 }
1230 get_page(page);
1231 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1232 frag = &skb_shinfo(skb)->frags[i];
1233 }
1234 } else if(i < MAX_SKB_FRAGS) {
1235 if (copy > PAGE_SIZE)
1236 copy = PAGE_SIZE;
1237 page = alloc_pages(sk->sk_allocation, 0);
1238 if (page == NULL) {
1239 err = -ENOMEM;
1240 goto error;
1241 }
1242 sk->sk_sndmsg_page = page;
1243 sk->sk_sndmsg_off = 0;
1244
1245 skb_fill_page_desc(skb, i, page, 0, 0);
1246 frag = &skb_shinfo(skb)->frags[i];
1247 skb->truesize += PAGE_SIZE;
1248 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1249 } else {
1250 err = -EMSGSIZE;
1251 goto error;
1252 }
1253 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1254 err = -EFAULT;
1255 goto error;
1256 }
1257 sk->sk_sndmsg_off += copy;
1258 frag->size += copy;
1259 skb->len += copy;
1260 skb->data_len += copy;
1261 }
1262 offset += copy;
1263 length -= copy;
1264 }
1265 return 0;
1266error:
1267 inet->cork.length -= length;
a11d206d 1268 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1269 return err;
1270}
1271
1272int ip6_push_pending_frames(struct sock *sk)
1273{
1274 struct sk_buff *skb, *tmp_skb;
1275 struct sk_buff **tail_skb;
1276 struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1277 struct inet_sock *inet = inet_sk(sk);
1278 struct ipv6_pinfo *np = inet6_sk(sk);
1279 struct ipv6hdr *hdr;
1280 struct ipv6_txoptions *opt = np->cork.opt;
1281 struct rt6_info *rt = np->cork.rt;
1282 struct flowi *fl = &inet->cork.fl;
1283 unsigned char proto = fl->proto;
1284 int err = 0;
1285
1286 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1287 goto out;
1288 tail_skb = &(skb_shinfo(skb)->frag_list);
1289
1290 /* move skb->data to ip header from ext header */
1291 if (skb->data < skb->nh.raw)
1292 __skb_pull(skb, skb->nh.raw - skb->data);
1293 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1294 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1295 *tail_skb = tmp_skb;
1296 tail_skb = &(tmp_skb->next);
1297 skb->len += tmp_skb->len;
1298 skb->data_len += tmp_skb->len;
1da177e4
LT
1299 skb->truesize += tmp_skb->truesize;
1300 __sock_put(tmp_skb->sk);
1301 tmp_skb->destructor = NULL;
1302 tmp_skb->sk = NULL;
1da177e4
LT
1303 }
1304
1305 ipv6_addr_copy(final_dst, &fl->fl6_dst);
1306 __skb_pull(skb, skb->h.raw - skb->nh.raw);
1307 if (opt && opt->opt_flen)
1308 ipv6_push_frag_opts(skb, opt, &proto);
1309 if (opt && opt->opt_nflen)
1310 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1311
1312 skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));
1ab1457c 1313
90bcaf7b 1314 *(__be32*)hdr = fl->fl6_flowlabel |
41a1f8ea 1315 htonl(0x60000000 | ((int)np->cork.tclass << 20));
1da177e4
LT
1316
1317 if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
1318 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
1319 else
1320 hdr->payload_len = 0;
1321 hdr->hop_limit = np->cork.hop_limit;
1322 hdr->nexthdr = proto;
1323 ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
1324 ipv6_addr_copy(&hdr->daddr, final_dst);
1325
a2c2064f
PM
1326 skb->priority = sk->sk_priority;
1327
1da177e4 1328 skb->dst = dst_clone(&rt->u.dst);
a11d206d 1329 IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
1da177e4
LT
1330 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
1331 if (err) {
1332 if (err > 0)
3320da89 1333 err = np->recverr ? net_xmit_errno(err) : 0;
1da177e4
LT
1334 if (err)
1335 goto error;
1336 }
1337
1338out:
1339 inet->cork.flags &= ~IPCORK_OPT;
a51482bd
JJ
1340 kfree(np->cork.opt);
1341 np->cork.opt = NULL;
1da177e4
LT
1342 if (np->cork.rt) {
1343 dst_release(&np->cork.rt->u.dst);
1344 np->cork.rt = NULL;
1345 inet->cork.flags &= ~IPCORK_ALLFRAG;
1346 }
1347 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1348 return err;
1349error:
1350 goto out;
1351}
1352
1353void ip6_flush_pending_frames(struct sock *sk)
1354{
1355 struct inet_sock *inet = inet_sk(sk);
1356 struct ipv6_pinfo *np = inet6_sk(sk);
1357 struct sk_buff *skb;
1358
1359 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
a11d206d
YH
1360 IP6_INC_STATS(ip6_dst_idev(skb->dst),
1361 IPSTATS_MIB_OUTDISCARDS);
1da177e4
LT
1362 kfree_skb(skb);
1363 }
1364
1365 inet->cork.flags &= ~IPCORK_OPT;
1366
a51482bd
JJ
1367 kfree(np->cork.opt);
1368 np->cork.opt = NULL;
1da177e4
LT
1369 if (np->cork.rt) {
1370 dst_release(&np->cork.rt->u.dst);
1371 np->cork.rt = NULL;
1372 inet->cork.flags &= ~IPCORK_ALLFRAG;
1373 }
1374 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1375}