[NET]: Remove unused security member in sk_buff
[linux-2.6-block.git] / net / ipv4 / ip_output.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
19 *
20 * See ip_input.c for original log
21 *
22 * Fixes:
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
26 * no route is found.
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
43 * datagrams.
44 * Hirokazu Takahashi: sendfile() on UDP works now.
45 */
46
47#include <asm/uaccess.h>
48#include <asm/system.h>
49#include <linux/module.h>
50#include <linux/types.h>
51#include <linux/kernel.h>
52#include <linux/sched.h>
53#include <linux/mm.h>
54#include <linux/string.h>
55#include <linux/errno.h>
56#include <linux/config.h>
57
58#include <linux/socket.h>
59#include <linux/sockios.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/proc_fs.h>
65#include <linux/stat.h>
66#include <linux/init.h>
67
68#include <net/snmp.h>
69#include <net/ip.h>
70#include <net/protocol.h>
71#include <net/route.h>
72#include <net/tcp.h>
73#include <net/udp.h>
74#include <linux/skbuff.h>
75#include <net/sock.h>
76#include <net/arp.h>
77#include <net/icmp.h>
78#include <net/raw.h>
79#include <net/checksum.h>
80#include <net/inetpeer.h>
81#include <net/checksum.h>
82#include <linux/igmp.h>
83#include <linux/netfilter_ipv4.h>
84#include <linux/netfilter_bridge.h>
85#include <linux/mroute.h>
86#include <linux/netlink.h>
87
88/*
89 * Shall we try to damage output packets if routing dev changes?
90 */
91
92int sysctl_ip_dynaddr;
93int sysctl_ip_default_ttl = IPDEFTTL;
94
95/* Generate a checksum for an outgoing IP datagram. */
96__inline__ void ip_send_check(struct iphdr *iph)
97{
98 iph->check = 0;
99 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
100}
101
102/* dev_loopback_xmit for use with netfilter. */
103static int ip_dev_loopback_xmit(struct sk_buff *newskb)
104{
105 newskb->mac.raw = newskb->data;
106 __skb_pull(newskb, newskb->nh.raw - newskb->data);
107 newskb->pkt_type = PACKET_LOOPBACK;
108 newskb->ip_summed = CHECKSUM_UNNECESSARY;
109 BUG_TRAP(newskb->dst);
bd96535b 110 nf_reset(newskb);
1da177e4
LT
111 netif_rx(newskb);
112 return 0;
113}
114
115static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
116{
117 int ttl = inet->uc_ttl;
118
119 if (ttl < 0)
120 ttl = dst_metric(dst, RTAX_HOPLIMIT);
121 return ttl;
122}
123
124/*
125 * Add an ip header to a skbuff and send it out.
126 *
127 */
128int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
129 u32 saddr, u32 daddr, struct ip_options *opt)
130{
131 struct inet_sock *inet = inet_sk(sk);
132 struct rtable *rt = (struct rtable *)skb->dst;
133 struct iphdr *iph;
134
135 /* Build the IP header. */
136 if (opt)
137 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
138 else
139 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
140
141 iph->version = 4;
142 iph->ihl = 5;
143 iph->tos = inet->tos;
144 if (ip_dont_fragment(sk, &rt->u.dst))
145 iph->frag_off = htons(IP_DF);
146 else
147 iph->frag_off = 0;
148 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
149 iph->daddr = rt->rt_dst;
150 iph->saddr = rt->rt_src;
151 iph->protocol = sk->sk_protocol;
152 iph->tot_len = htons(skb->len);
153 ip_select_ident(iph, &rt->u.dst, sk);
154 skb->nh.iph = iph;
155
156 if (opt && opt->optlen) {
157 iph->ihl += opt->optlen>>2;
158 ip_options_build(skb, opt, daddr, rt, 0);
159 }
160 ip_send_check(iph);
161
162 skb->priority = sk->sk_priority;
163
164 /* Send it out. */
165 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
166 dst_output);
167}
168
169static inline int ip_finish_output2(struct sk_buff *skb)
170{
171 struct dst_entry *dst = skb->dst;
172 struct hh_cache *hh = dst->hh;
173 struct net_device *dev = dst->dev;
174 int hh_len = LL_RESERVED_SPACE(dev);
175
176 /* Be paranoid, rather than too clever. */
177 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
178 struct sk_buff *skb2;
179
180 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
181 if (skb2 == NULL) {
182 kfree_skb(skb);
183 return -ENOMEM;
184 }
185 if (skb->sk)
186 skb_set_owner_w(skb2, skb->sk);
187 kfree_skb(skb);
188 skb = skb2;
189 }
190
9666dae5
PM
191#ifdef CONFIG_BRIDGE_NETFILTER
192 /* bridge-netfilter defers calling some IP hooks to the bridge layer
193 * and still needs the conntrack reference.
194 */
195 if (skb->nf_bridge == NULL)
196#endif
197 nf_reset(skb);
b31e5b1b 198
1da177e4
LT
199 if (hh) {
200 int hh_alen;
201
202 read_lock_bh(&hh->hh_lock);
203 hh_alen = HH_DATA_ALIGN(hh->hh_len);
204 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
205 read_unlock_bh(&hh->hh_lock);
206 skb_push(skb, hh->hh_len);
207 return hh->hh_output(skb);
208 } else if (dst->neighbour)
209 return dst->neighbour->output(skb);
210
211 if (net_ratelimit())
212 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
213 kfree_skb(skb);
214 return -EINVAL;
215}
216
217int ip_finish_output(struct sk_buff *skb)
218{
219 struct net_device *dev = skb->dst->dev;
220
221 skb->dev = dev;
222 skb->protocol = htons(ETH_P_IP);
223
224 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
225 ip_finish_output2);
226}
227
228int ip_mc_output(struct sk_buff *skb)
229{
230 struct sock *sk = skb->sk;
231 struct rtable *rt = (struct rtable*)skb->dst;
232 struct net_device *dev = rt->u.dst.dev;
233
234 /*
235 * If the indicated interface is up and running, send the packet.
236 */
237 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
238
239 skb->dev = dev;
240 skb->protocol = htons(ETH_P_IP);
241
242 /*
243 * Multicasts are looped back for other local users
244 */
245
246 if (rt->rt_flags&RTCF_MULTICAST) {
247 if ((!sk || inet_sk(sk)->mc_loop)
248#ifdef CONFIG_IP_MROUTE
249 /* Small optimization: do not loopback not local frames,
250 which returned after forwarding; they will be dropped
251 by ip_mr_input in any case.
252 Note, that local frames are looped back to be delivered
253 to local recipients.
254
255 This check is duplicated in ip_mr_input at the moment.
256 */
257 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
258#endif
259 ) {
260 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
261 if (newskb)
262 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
263 newskb->dev,
264 ip_dev_loopback_xmit);
265 }
266
267 /* Multicasts with ttl 0 must not go beyond the host */
268
269 if (skb->nh.iph->ttl == 0) {
270 kfree_skb(skb);
271 return 0;
272 }
273 }
274
275 if (rt->rt_flags&RTCF_BROADCAST) {
276 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
277 if (newskb)
278 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
279 newskb->dev, ip_dev_loopback_xmit);
280 }
281
282 if (skb->len > dst_mtu(&rt->u.dst))
283 return ip_fragment(skb, ip_finish_output);
284 else
285 return ip_finish_output(skb);
286}
287
288int ip_output(struct sk_buff *skb)
289{
290 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
291
292 if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->tso_size)
293 return ip_fragment(skb, ip_finish_output);
294 else
295 return ip_finish_output(skb);
296}
297
298int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
299{
300 struct sock *sk = skb->sk;
301 struct inet_sock *inet = inet_sk(sk);
302 struct ip_options *opt = inet->opt;
303 struct rtable *rt;
304 struct iphdr *iph;
305
306 /* Skip all of this if the packet is already routed,
307 * f.e. by something like SCTP.
308 */
309 rt = (struct rtable *) skb->dst;
310 if (rt != NULL)
311 goto packet_routed;
312
313 /* Make sure we can route this packet. */
314 rt = (struct rtable *)__sk_dst_check(sk, 0);
315 if (rt == NULL) {
316 u32 daddr;
317
318 /* Use correct destination address if we have options. */
319 daddr = inet->daddr;
320 if(opt && opt->srr)
321 daddr = opt->faddr;
322
323 {
324 struct flowi fl = { .oif = sk->sk_bound_dev_if,
325 .nl_u = { .ip4_u =
326 { .daddr = daddr,
327 .saddr = inet->saddr,
328 .tos = RT_CONN_FLAGS(sk) } },
329 .proto = sk->sk_protocol,
330 .uli_u = { .ports =
331 { .sport = inet->sport,
332 .dport = inet->dport } } };
333
334 /* If this fails, retransmit mechanism of transport layer will
335 * keep trying until route appears or the connection times
336 * itself out.
337 */
338 if (ip_route_output_flow(&rt, &fl, sk, 0))
339 goto no_route;
340 }
341 __sk_dst_set(sk, &rt->u.dst);
342 tcp_v4_setup_caps(sk, &rt->u.dst);
343 }
344 skb->dst = dst_clone(&rt->u.dst);
345
346packet_routed:
347 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
348 goto no_route;
349
350 /* OK, we know where to send it, allocate and build IP header. */
351 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
352 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
353 iph->tot_len = htons(skb->len);
354 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
355 iph->frag_off = htons(IP_DF);
356 else
357 iph->frag_off = 0;
358 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
359 iph->protocol = sk->sk_protocol;
360 iph->saddr = rt->rt_src;
361 iph->daddr = rt->rt_dst;
362 skb->nh.iph = iph;
363 /* Transport layer set skb->h.foo itself. */
364
365 if (opt && opt->optlen) {
366 iph->ihl += opt->optlen >> 2;
367 ip_options_build(skb, opt, inet->daddr, rt, 0);
368 }
369
370 ip_select_ident_more(iph, &rt->u.dst, sk, skb_shinfo(skb)->tso_segs);
371
372 /* Add an IP checksum. */
373 ip_send_check(iph);
374
375 skb->priority = sk->sk_priority;
376
377 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
378 dst_output);
379
380no_route:
381 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
382 kfree_skb(skb);
383 return -EHOSTUNREACH;
384}
385
386
387static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
388{
389 to->pkt_type = from->pkt_type;
390 to->priority = from->priority;
391 to->protocol = from->protocol;
1da177e4
LT
392 dst_release(to->dst);
393 to->dst = dst_clone(from->dst);
394 to->dev = from->dev;
395
396 /* Copy the flags to each fragment. */
397 IPCB(to)->flags = IPCB(from)->flags;
398
399#ifdef CONFIG_NET_SCHED
400 to->tc_index = from->tc_index;
401#endif
402#ifdef CONFIG_NETFILTER
403 to->nfmark = from->nfmark;
404 to->nfcache = from->nfcache;
405 /* Connection association is same as pre-frag packet */
406 nf_conntrack_put(to->nfct);
407 to->nfct = from->nfct;
408 nf_conntrack_get(to->nfct);
409 to->nfctinfo = from->nfctinfo;
410#ifdef CONFIG_BRIDGE_NETFILTER
411 nf_bridge_put(to->nf_bridge);
412 to->nf_bridge = from->nf_bridge;
413 nf_bridge_get(to->nf_bridge);
414#endif
1da177e4
LT
415#endif
416}
417
418/*
419 * This IP datagram is too large to be sent in one piece. Break it up into
420 * smaller pieces (each of size equal to IP header plus
421 * a block of the data of the original IP data part) that will yet fit in a
422 * single device frame, and queue such a frame for sending.
423 */
424
425int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
426{
427 struct iphdr *iph;
428 int raw = 0;
429 int ptr;
430 struct net_device *dev;
431 struct sk_buff *skb2;
432 unsigned int mtu, hlen, left, len, ll_rs;
433 int offset;
434 int not_last_frag;
435 struct rtable *rt = (struct rtable*)skb->dst;
436 int err = 0;
437
438 dev = rt->u.dst.dev;
439
440 /*
441 * Point into the IP datagram header.
442 */
443
444 iph = skb->nh.iph;
445
446 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
447 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
448 htonl(dst_mtu(&rt->u.dst)));
449 kfree_skb(skb);
450 return -EMSGSIZE;
451 }
452
453 /*
454 * Setup starting values.
455 */
456
457 hlen = iph->ihl * 4;
458 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
459
460 /* When frag_list is given, use it. First, check its validity:
461 * some transformers could create wrong frag_list or break existing
462 * one, it is not prohibited. In this case fall back to copying.
463 *
464 * LATER: this step can be merged to real generation of fragments,
465 * we can switch to copy when see the first bad fragment.
466 */
467 if (skb_shinfo(skb)->frag_list) {
468 struct sk_buff *frag;
469 int first_len = skb_pagelen(skb);
470
471 if (first_len - hlen > mtu ||
472 ((first_len - hlen) & 7) ||
473 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
474 skb_cloned(skb))
475 goto slow_path;
476
477 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
478 /* Correct geometry. */
479 if (frag->len > mtu ||
480 ((frag->len & 7) && frag->next) ||
481 skb_headroom(frag) < hlen)
482 goto slow_path;
483
484 /* Partially cloned skb? */
485 if (skb_shared(frag))
486 goto slow_path;
2fdba6b0
HX
487
488 BUG_ON(frag->sk);
489 if (skb->sk) {
490 sock_hold(skb->sk);
491 frag->sk = skb->sk;
492 frag->destructor = sock_wfree;
493 skb->truesize -= frag->truesize;
494 }
1da177e4
LT
495 }
496
497 /* Everything is OK. Generate! */
498
499 err = 0;
500 offset = 0;
501 frag = skb_shinfo(skb)->frag_list;
502 skb_shinfo(skb)->frag_list = NULL;
503 skb->data_len = first_len - skb_headlen(skb);
504 skb->len = first_len;
505 iph->tot_len = htons(first_len);
506 iph->frag_off = htons(IP_MF);
507 ip_send_check(iph);
508
509 for (;;) {
510 /* Prepare header of the next frame,
511 * before previous one went down. */
512 if (frag) {
513 frag->ip_summed = CHECKSUM_NONE;
514 frag->h.raw = frag->data;
515 frag->nh.raw = __skb_push(frag, hlen);
516 memcpy(frag->nh.raw, iph, hlen);
517 iph = frag->nh.iph;
518 iph->tot_len = htons(frag->len);
519 ip_copy_metadata(frag, skb);
520 if (offset == 0)
521 ip_options_fragment(frag);
522 offset += skb->len - hlen;
523 iph->frag_off = htons(offset>>3);
524 if (frag->next != NULL)
525 iph->frag_off |= htons(IP_MF);
526 /* Ready, complete checksum */
527 ip_send_check(iph);
528 }
529
530 err = output(skb);
531
532 if (err || !frag)
533 break;
534
535 skb = frag;
536 frag = skb->next;
537 skb->next = NULL;
538 }
539
540 if (err == 0) {
541 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
542 return 0;
543 }
544
545 while (frag) {
546 skb = frag->next;
547 kfree_skb(frag);
548 frag = skb;
549 }
550 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
551 return err;
552 }
553
554slow_path:
555 left = skb->len - hlen; /* Space per frame */
556 ptr = raw + hlen; /* Where to start from */
557
558#ifdef CONFIG_BRIDGE_NETFILTER
559 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
560 * we need to make room for the encapsulating header */
561 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
562 mtu -= nf_bridge_pad(skb);
563#else
564 ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
565#endif
566 /*
567 * Fragment the datagram.
568 */
569
570 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
571 not_last_frag = iph->frag_off & htons(IP_MF);
572
573 /*
574 * Keep copying data until we run out.
575 */
576
577 while(left > 0) {
578 len = left;
579 /* IF: it doesn't fit, use 'mtu' - the data space left */
580 if (len > mtu)
581 len = mtu;
582 /* IF: we are not sending upto and including the packet end
583 then align the next start on an eight byte boundary */
584 if (len < left) {
585 len &= ~7;
586 }
587 /*
588 * Allocate buffer.
589 */
590
591 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
592 NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
593 err = -ENOMEM;
594 goto fail;
595 }
596
597 /*
598 * Set up data on packet
599 */
600
601 ip_copy_metadata(skb2, skb);
602 skb_reserve(skb2, ll_rs);
603 skb_put(skb2, len + hlen);
604 skb2->nh.raw = skb2->data;
605 skb2->h.raw = skb2->data + hlen;
606
607 /*
608 * Charge the memory for the fragment to any owner
609 * it might possess
610 */
611
612 if (skb->sk)
613 skb_set_owner_w(skb2, skb->sk);
614
615 /*
616 * Copy the packet header into the new buffer.
617 */
618
619 memcpy(skb2->nh.raw, skb->data, hlen);
620
621 /*
622 * Copy a block of the IP datagram.
623 */
624 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
625 BUG();
626 left -= len;
627
628 /*
629 * Fill in the new header fields.
630 */
631 iph = skb2->nh.iph;
632 iph->frag_off = htons((offset >> 3));
633
634 /* ANK: dirty, but effective trick. Upgrade options only if
635 * the segment to be fragmented was THE FIRST (otherwise,
636 * options are already fixed) and make it ONCE
637 * on the initial skb, so that all the following fragments
638 * will inherit fixed options.
639 */
640 if (offset == 0)
641 ip_options_fragment(skb);
642
643 /*
644 * Added AC : If we are fragmenting a fragment that's not the
645 * last fragment then keep MF on each bit
646 */
647 if (left > 0 || not_last_frag)
648 iph->frag_off |= htons(IP_MF);
649 ptr += len;
650 offset += len;
651
652 /*
653 * Put this fragment into the sending queue.
654 */
655
656 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
657
658 iph->tot_len = htons(len + hlen);
659
660 ip_send_check(iph);
661
662 err = output(skb2);
663 if (err)
664 goto fail;
665 }
666 kfree_skb(skb);
667 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
668 return err;
669
670fail:
671 kfree_skb(skb);
672 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
673 return err;
674}
675
676int
677ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
678{
679 struct iovec *iov = from;
680
681 if (skb->ip_summed == CHECKSUM_HW) {
682 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
683 return -EFAULT;
684 } else {
685 unsigned int csum = 0;
686 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
687 return -EFAULT;
688 skb->csum = csum_block_add(skb->csum, csum, odd);
689 }
690 return 0;
691}
692
693static inline unsigned int
694csum_page(struct page *page, int offset, int copy)
695{
696 char *kaddr;
697 unsigned int csum;
698 kaddr = kmap(page);
699 csum = csum_partial(kaddr + offset, copy, 0);
700 kunmap(page);
701 return csum;
702}
703
704/*
705 * ip_append_data() and ip_append_page() can make one large IP datagram
706 * from many pieces of data. Each pieces will be holded on the socket
707 * until ip_push_pending_frames() is called. Each piece can be a page
708 * or non-page data.
709 *
710 * Not only UDP, other transport protocols - e.g. raw sockets - can use
711 * this interface potentially.
712 *
713 * LATER: length must be adjusted by pad at tail, when it is required.
714 */
715int ip_append_data(struct sock *sk,
716 int getfrag(void *from, char *to, int offset, int len,
717 int odd, struct sk_buff *skb),
718 void *from, int length, int transhdrlen,
719 struct ipcm_cookie *ipc, struct rtable *rt,
720 unsigned int flags)
721{
722 struct inet_sock *inet = inet_sk(sk);
723 struct sk_buff *skb;
724
725 struct ip_options *opt = NULL;
726 int hh_len;
727 int exthdrlen;
728 int mtu;
729 int copy;
730 int err;
731 int offset = 0;
732 unsigned int maxfraglen, fragheaderlen;
733 int csummode = CHECKSUM_NONE;
734
735 if (flags&MSG_PROBE)
736 return 0;
737
738 if (skb_queue_empty(&sk->sk_write_queue)) {
739 /*
740 * setup for corking.
741 */
742 opt = ipc->opt;
743 if (opt) {
744 if (inet->cork.opt == NULL) {
745 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
746 if (unlikely(inet->cork.opt == NULL))
747 return -ENOBUFS;
748 }
749 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
750 inet->cork.flags |= IPCORK_OPT;
751 inet->cork.addr = ipc->addr;
752 }
753 dst_hold(&rt->u.dst);
754 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
755 inet->cork.rt = rt;
756 inet->cork.length = 0;
757 sk->sk_sndmsg_page = NULL;
758 sk->sk_sndmsg_off = 0;
759 if ((exthdrlen = rt->u.dst.header_len) != 0) {
760 length += exthdrlen;
761 transhdrlen += exthdrlen;
762 }
763 } else {
764 rt = inet->cork.rt;
765 if (inet->cork.flags & IPCORK_OPT)
766 opt = inet->cork.opt;
767
768 transhdrlen = 0;
769 exthdrlen = 0;
770 mtu = inet->cork.fragsize;
771 }
772 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
773
774 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
775 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
776
777 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
778 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
779 return -EMSGSIZE;
780 }
781
782 /*
783 * transhdrlen > 0 means that this is the first fragment and we wish
784 * it won't be fragmented in the future.
785 */
786 if (transhdrlen &&
787 length + fragheaderlen <= mtu &&
788 rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
789 !exthdrlen)
790 csummode = CHECKSUM_HW;
791
792 inet->cork.length += length;
793
794 /* So, what's going on in the loop below?
795 *
796 * We use calculated fragment length to generate chained skb,
797 * each of segments is IP fragment ready for sending to network after
798 * adding appropriate IP header.
799 */
800
801 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
802 goto alloc_new_skb;
803
804 while (length > 0) {
805 /* Check if the remaining data fits into current packet. */
806 copy = mtu - skb->len;
807 if (copy < length)
808 copy = maxfraglen - skb->len;
809 if (copy <= 0) {
810 char *data;
811 unsigned int datalen;
812 unsigned int fraglen;
813 unsigned int fraggap;
814 unsigned int alloclen;
815 struct sk_buff *skb_prev;
816alloc_new_skb:
817 skb_prev = skb;
818 if (skb_prev)
819 fraggap = skb_prev->len - maxfraglen;
820 else
821 fraggap = 0;
822
823 /*
824 * If remaining data exceeds the mtu,
825 * we know we need more fragment(s).
826 */
827 datalen = length + fraggap;
828 if (datalen > mtu - fragheaderlen)
829 datalen = maxfraglen - fragheaderlen;
830 fraglen = datalen + fragheaderlen;
831
832 if ((flags & MSG_MORE) &&
833 !(rt->u.dst.dev->features&NETIF_F_SG))
834 alloclen = mtu;
835 else
836 alloclen = datalen + fragheaderlen;
837
838 /* The last fragment gets additional space at tail.
839 * Note, with MSG_MORE we overallocate on fragments,
840 * because we have no idea what fragment will be
841 * the last.
842 */
843 if (datalen == length)
844 alloclen += rt->u.dst.trailer_len;
845
846 if (transhdrlen) {
847 skb = sock_alloc_send_skb(sk,
848 alloclen + hh_len + 15,
849 (flags & MSG_DONTWAIT), &err);
850 } else {
851 skb = NULL;
852 if (atomic_read(&sk->sk_wmem_alloc) <=
853 2 * sk->sk_sndbuf)
854 skb = sock_wmalloc(sk,
855 alloclen + hh_len + 15, 1,
856 sk->sk_allocation);
857 if (unlikely(skb == NULL))
858 err = -ENOBUFS;
859 }
860 if (skb == NULL)
861 goto error;
862
863 /*
864 * Fill in the control structures
865 */
866 skb->ip_summed = csummode;
867 skb->csum = 0;
868 skb_reserve(skb, hh_len);
869
870 /*
871 * Find where to start putting bytes.
872 */
873 data = skb_put(skb, fraglen);
874 skb->nh.raw = data + exthdrlen;
875 data += fragheaderlen;
876 skb->h.raw = data + exthdrlen;
877
878 if (fraggap) {
879 skb->csum = skb_copy_and_csum_bits(
880 skb_prev, maxfraglen,
881 data + transhdrlen, fraggap, 0);
882 skb_prev->csum = csum_sub(skb_prev->csum,
883 skb->csum);
884 data += fraggap;
885 skb_trim(skb_prev, maxfraglen);
886 }
887
888 copy = datalen - transhdrlen - fraggap;
889 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
890 err = -EFAULT;
891 kfree_skb(skb);
892 goto error;
893 }
894
895 offset += copy;
896 length -= datalen - fraggap;
897 transhdrlen = 0;
898 exthdrlen = 0;
899 csummode = CHECKSUM_NONE;
900
901 /*
902 * Put the packet on the pending queue.
903 */
904 __skb_queue_tail(&sk->sk_write_queue, skb);
905 continue;
906 }
907
908 if (copy > length)
909 copy = length;
910
911 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
912 unsigned int off;
913
914 off = skb->len;
915 if (getfrag(from, skb_put(skb, copy),
916 offset, copy, off, skb) < 0) {
917 __skb_trim(skb, off);
918 err = -EFAULT;
919 goto error;
920 }
921 } else {
922 int i = skb_shinfo(skb)->nr_frags;
923 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
924 struct page *page = sk->sk_sndmsg_page;
925 int off = sk->sk_sndmsg_off;
926 unsigned int left;
927
928 if (page && (left = PAGE_SIZE - off) > 0) {
929 if (copy >= left)
930 copy = left;
931 if (page != frag->page) {
932 if (i == MAX_SKB_FRAGS) {
933 err = -EMSGSIZE;
934 goto error;
935 }
936 get_page(page);
937 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
938 frag = &skb_shinfo(skb)->frags[i];
939 }
940 } else if (i < MAX_SKB_FRAGS) {
941 if (copy > PAGE_SIZE)
942 copy = PAGE_SIZE;
943 page = alloc_pages(sk->sk_allocation, 0);
944 if (page == NULL) {
945 err = -ENOMEM;
946 goto error;
947 }
948 sk->sk_sndmsg_page = page;
949 sk->sk_sndmsg_off = 0;
950
951 skb_fill_page_desc(skb, i, page, 0, 0);
952 frag = &skb_shinfo(skb)->frags[i];
953 skb->truesize += PAGE_SIZE;
954 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
955 } else {
956 err = -EMSGSIZE;
957 goto error;
958 }
959 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
960 err = -EFAULT;
961 goto error;
962 }
963 sk->sk_sndmsg_off += copy;
964 frag->size += copy;
965 skb->len += copy;
966 skb->data_len += copy;
967 }
968 offset += copy;
969 length -= copy;
970 }
971
972 return 0;
973
974error:
975 inet->cork.length -= length;
976 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
977 return err;
978}
979
980ssize_t ip_append_page(struct sock *sk, struct page *page,
981 int offset, size_t size, int flags)
982{
983 struct inet_sock *inet = inet_sk(sk);
984 struct sk_buff *skb;
985 struct rtable *rt;
986 struct ip_options *opt = NULL;
987 int hh_len;
988 int mtu;
989 int len;
990 int err;
991 unsigned int maxfraglen, fragheaderlen, fraggap;
992
993 if (inet->hdrincl)
994 return -EPERM;
995
996 if (flags&MSG_PROBE)
997 return 0;
998
999 if (skb_queue_empty(&sk->sk_write_queue))
1000 return -EINVAL;
1001
1002 rt = inet->cork.rt;
1003 if (inet->cork.flags & IPCORK_OPT)
1004 opt = inet->cork.opt;
1005
1006 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1007 return -EOPNOTSUPP;
1008
1009 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1010 mtu = inet->cork.fragsize;
1011
1012 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1013 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1014
1015 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1016 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1017 return -EMSGSIZE;
1018 }
1019
1020 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1021 return -EINVAL;
1022
1023 inet->cork.length += size;
1024
1025 while (size > 0) {
1026 int i;
1027
1028 /* Check if the remaining data fits into current packet. */
1029 len = mtu - skb->len;
1030 if (len < size)
1031 len = maxfraglen - skb->len;
1032 if (len <= 0) {
1033 struct sk_buff *skb_prev;
1034 char *data;
1035 struct iphdr *iph;
1036 int alloclen;
1037
1038 skb_prev = skb;
1039 if (skb_prev)
1040 fraggap = skb_prev->len - maxfraglen;
1041 else
1042 fraggap = 0;
1043
1044 alloclen = fragheaderlen + hh_len + fraggap + 15;
1045 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1046 if (unlikely(!skb)) {
1047 err = -ENOBUFS;
1048 goto error;
1049 }
1050
1051 /*
1052 * Fill in the control structures
1053 */
1054 skb->ip_summed = CHECKSUM_NONE;
1055 skb->csum = 0;
1056 skb_reserve(skb, hh_len);
1057
1058 /*
1059 * Find where to start putting bytes.
1060 */
1061 data = skb_put(skb, fragheaderlen + fraggap);
1062 skb->nh.iph = iph = (struct iphdr *)data;
1063 data += fragheaderlen;
1064 skb->h.raw = data;
1065
1066 if (fraggap) {
1067 skb->csum = skb_copy_and_csum_bits(
1068 skb_prev, maxfraglen,
1069 data, fraggap, 0);
1070 skb_prev->csum = csum_sub(skb_prev->csum,
1071 skb->csum);
1072 skb_trim(skb_prev, maxfraglen);
1073 }
1074
1075 /*
1076 * Put the packet on the pending queue.
1077 */
1078 __skb_queue_tail(&sk->sk_write_queue, skb);
1079 continue;
1080 }
1081
1082 i = skb_shinfo(skb)->nr_frags;
1083 if (len > size)
1084 len = size;
1085 if (skb_can_coalesce(skb, i, page, offset)) {
1086 skb_shinfo(skb)->frags[i-1].size += len;
1087 } else if (i < MAX_SKB_FRAGS) {
1088 get_page(page);
1089 skb_fill_page_desc(skb, i, page, offset, len);
1090 } else {
1091 err = -EMSGSIZE;
1092 goto error;
1093 }
1094
1095 if (skb->ip_summed == CHECKSUM_NONE) {
1096 unsigned int csum;
1097 csum = csum_page(page, offset, len);
1098 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1099 }
1100
1101 skb->len += len;
1102 skb->data_len += len;
1103 offset += len;
1104 size -= len;
1105 }
1106 return 0;
1107
1108error:
1109 inet->cork.length -= size;
1110 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1111 return err;
1112}
1113
1114/*
1115 * Combined all pending IP fragments on the socket as one IP datagram
1116 * and push them out.
1117 */
1118int ip_push_pending_frames(struct sock *sk)
1119{
1120 struct sk_buff *skb, *tmp_skb;
1121 struct sk_buff **tail_skb;
1122 struct inet_sock *inet = inet_sk(sk);
1123 struct ip_options *opt = NULL;
1124 struct rtable *rt = inet->cork.rt;
1125 struct iphdr *iph;
1126 int df = 0;
1127 __u8 ttl;
1128 int err = 0;
1129
1130 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1131 goto out;
1132 tail_skb = &(skb_shinfo(skb)->frag_list);
1133
1134 /* move skb->data to ip header from ext header */
1135 if (skb->data < skb->nh.raw)
1136 __skb_pull(skb, skb->nh.raw - skb->data);
1137 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1138 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1139 *tail_skb = tmp_skb;
1140 tail_skb = &(tmp_skb->next);
1141 skb->len += tmp_skb->len;
1142 skb->data_len += tmp_skb->len;
1143 skb->truesize += tmp_skb->truesize;
1144 __sock_put(tmp_skb->sk);
1145 tmp_skb->destructor = NULL;
1146 tmp_skb->sk = NULL;
1147 }
1148
1149 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1150 * to fragment the frame generated here. No matter, what transforms
1151 * how transforms change size of the packet, it will come out.
1152 */
1153 if (inet->pmtudisc != IP_PMTUDISC_DO)
1154 skb->local_df = 1;
1155
1156 /* DF bit is set when we want to see DF on outgoing frames.
1157 * If local_df is set too, we still allow to fragment this frame
1158 * locally. */
1159 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1160 (skb->len <= dst_mtu(&rt->u.dst) &&
1161 ip_dont_fragment(sk, &rt->u.dst)))
1162 df = htons(IP_DF);
1163
1164 if (inet->cork.flags & IPCORK_OPT)
1165 opt = inet->cork.opt;
1166
1167 if (rt->rt_type == RTN_MULTICAST)
1168 ttl = inet->mc_ttl;
1169 else
1170 ttl = ip_select_ttl(inet, &rt->u.dst);
1171
1172 iph = (struct iphdr *)skb->data;
1173 iph->version = 4;
1174 iph->ihl = 5;
1175 if (opt) {
1176 iph->ihl += opt->optlen>>2;
1177 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1178 }
1179 iph->tos = inet->tos;
1180 iph->tot_len = htons(skb->len);
1181 iph->frag_off = df;
1182 if (!df) {
1183 __ip_select_ident(iph, &rt->u.dst, 0);
1184 } else {
1185 iph->id = htons(inet->id++);
1186 }
1187 iph->ttl = ttl;
1188 iph->protocol = sk->sk_protocol;
1189 iph->saddr = rt->rt_src;
1190 iph->daddr = rt->rt_dst;
1191 ip_send_check(iph);
1192
1193 skb->priority = sk->sk_priority;
1194 skb->dst = dst_clone(&rt->u.dst);
1195
1196 /* Netfilter gets whole the not fragmented skb. */
1197 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1198 skb->dst->dev, dst_output);
1199 if (err) {
1200 if (err > 0)
1201 err = inet->recverr ? net_xmit_errno(err) : 0;
1202 if (err)
1203 goto error;
1204 }
1205
1206out:
1207 inet->cork.flags &= ~IPCORK_OPT;
1208 if (inet->cork.opt) {
1209 kfree(inet->cork.opt);
1210 inet->cork.opt = NULL;
1211 }
1212 if (inet->cork.rt) {
1213 ip_rt_put(inet->cork.rt);
1214 inet->cork.rt = NULL;
1215 }
1216 return err;
1217
1218error:
1219 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1220 goto out;
1221}
1222
1223/*
1224 * Throw away all pending data on the socket.
1225 */
1226void ip_flush_pending_frames(struct sock *sk)
1227{
1228 struct inet_sock *inet = inet_sk(sk);
1229 struct sk_buff *skb;
1230
1231 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1232 kfree_skb(skb);
1233
1234 inet->cork.flags &= ~IPCORK_OPT;
1235 if (inet->cork.opt) {
1236 kfree(inet->cork.opt);
1237 inet->cork.opt = NULL;
1238 }
1239 if (inet->cork.rt) {
1240 ip_rt_put(inet->cork.rt);
1241 inet->cork.rt = NULL;
1242 }
1243}
1244
1245
1246/*
1247 * Fetch data from kernel space and fill in checksum if needed.
1248 */
1249static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1250 int len, int odd, struct sk_buff *skb)
1251{
1252 unsigned int csum;
1253
1254 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1255 skb->csum = csum_block_add(skb->csum, csum, odd);
1256 return 0;
1257}
1258
1259/*
1260 * Generic function to send a packet as reply to another packet.
1261 * Used to send TCP resets so far. ICMP should use this function too.
1262 *
1263 * Should run single threaded per socket because it uses the sock
1264 * structure to pass arguments.
1265 *
1266 * LATER: switch from ip_build_xmit to ip_append_*
1267 */
1268void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1269 unsigned int len)
1270{
1271 struct inet_sock *inet = inet_sk(sk);
1272 struct {
1273 struct ip_options opt;
1274 char data[40];
1275 } replyopts;
1276 struct ipcm_cookie ipc;
1277 u32 daddr;
1278 struct rtable *rt = (struct rtable*)skb->dst;
1279
1280 if (ip_options_echo(&replyopts.opt, skb))
1281 return;
1282
1283 daddr = ipc.addr = rt->rt_src;
1284 ipc.opt = NULL;
1285
1286 if (replyopts.opt.optlen) {
1287 ipc.opt = &replyopts.opt;
1288
1289 if (ipc.opt->srr)
1290 daddr = replyopts.opt.faddr;
1291 }
1292
1293 {
1294 struct flowi fl = { .nl_u = { .ip4_u =
1295 { .daddr = daddr,
1296 .saddr = rt->rt_spec_dst,
1297 .tos = RT_TOS(skb->nh.iph->tos) } },
1298 /* Not quite clean, but right. */
1299 .uli_u = { .ports =
1300 { .sport = skb->h.th->dest,
1301 .dport = skb->h.th->source } },
1302 .proto = sk->sk_protocol };
1303 if (ip_route_output_key(&rt, &fl))
1304 return;
1305 }
1306
1307 /* And let IP do all the hard work.
1308
1309 This chunk is not reenterable, hence spinlock.
1310 Note that it uses the fact, that this function is called
1311 with locally disabled BH and that sk cannot be already spinlocked.
1312 */
1313 bh_lock_sock(sk);
1314 inet->tos = skb->nh.iph->tos;
1315 sk->sk_priority = skb->priority;
1316 sk->sk_protocol = skb->nh.iph->protocol;
1317 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1318 &ipc, rt, MSG_DONTWAIT);
1319 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1320 if (arg->csumoffset >= 0)
1321 *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1322 skb->ip_summed = CHECKSUM_NONE;
1323 ip_push_pending_frames(sk);
1324 }
1325
1326 bh_unlock_sock(sk);
1327
1328 ip_rt_put(rt);
1329}
1330
1331/*
1332 * IP protocol layer initialiser
1333 */
1334
1335static struct packet_type ip_packet_type = {
1336 .type = __constant_htons(ETH_P_IP),
1337 .func = ip_rcv,
1338};
1339
1340/*
1341 * IP registers the packet type and then calls the subprotocol initialisers
1342 */
1343
1344void __init ip_init(void)
1345{
1346 dev_add_pack(&ip_packet_type);
1347
1348 ip_rt_init();
1349 inet_initpeers();
1350
1351#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1352 igmp_mc_proc_init();
1353#endif
1354}
1355
1356EXPORT_SYMBOL(ip_finish_output);
1357EXPORT_SYMBOL(ip_fragment);
1358EXPORT_SYMBOL(ip_generic_getfrag);
1359EXPORT_SYMBOL(ip_queue_xmit);
1360EXPORT_SYMBOL(ip_send_check);
1361
1362#ifdef CONFIG_SYSCTL
1363EXPORT_SYMBOL(sysctl_ip_default_ttl);
1364#endif