2 * xfrm_output.c - Common IPsec encapsulation code.
4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/errno.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/netfilter.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
22 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
24 static int xfrm_skb_check_space(struct sk_buff *skb)
26 struct dst_entry *dst = skb_dst(skb);
27 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
29 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
38 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
41 /* Children define the path of the packet through the
42 * Linux networking. Thus, destinations are stackable.
45 static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
47 struct dst_entry *child = dst_clone(skb_dst(skb)->child);
53 static int xfrm_output_one(struct sk_buff *skb, int err)
55 struct dst_entry *dst = skb_dst(skb);
56 struct xfrm_state *x = dst->xfrm;
57 struct net *net = xs_net(x);
63 err = xfrm_skb_check_space(skb);
65 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
69 err = x->outer_mode->output(x, skb);
71 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
75 spin_lock_bh(&x->lock);
77 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
78 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
83 err = xfrm_state_check_expire(x);
85 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
89 err = x->repl->overflow(x, skb);
91 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
95 x->curlft.bytes += skb->len;
98 spin_unlock_bh(&x->lock);
102 if (xfrm_offload(skb)) {
103 x->type_offload->encap(x, skb);
105 err = x->type->output(x, skb);
106 if (err == -EINPROGRESS)
112 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
116 dst = skb_dst_pop(skb);
118 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
122 skb_dst_set(skb, dst);
124 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
129 spin_unlock_bh(&x->lock);
136 int xfrm_output_resume(struct sk_buff *skb, int err)
138 struct net *net = xs_net(skb_dst(skb)->xfrm);
140 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
143 err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
144 if (unlikely(err != 1))
147 if (!skb_dst(skb)->xfrm)
148 return dst_output(net, skb->sk, skb);
150 err = nf_hook(skb_dst(skb)->ops->family,
151 NF_INET_POST_ROUTING, net, skb->sk, skb,
152 NULL, skb_dst(skb)->dev, xfrm_output2);
153 if (unlikely(err != 1))
157 if (err == -EINPROGRESS)
163 EXPORT_SYMBOL_GPL(xfrm_output_resume);
165 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
167 return xfrm_output_resume(skb, 1);
170 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
172 struct sk_buff *segs;
174 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
175 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
176 segs = skb_gso_segment(skb, 0);
179 return PTR_ERR(segs);
184 struct sk_buff *nskb = segs->next;
188 err = xfrm_output2(net, sk, segs);
191 kfree_skb_list(nskb);
201 int xfrm_output(struct sock *sk, struct sk_buff *skb)
203 struct net *net = dev_net(skb_dst(skb)->dev);
204 struct xfrm_state *x = skb_dst(skb)->xfrm;
209 if (xfrm_dev_offload_ok(skb, x)) {
212 sp = secpath_dup(skb->sp);
214 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
219 secpath_put(skb->sp);
223 sp->xvec[skb->sp->len++] = x;
226 if (skb_is_gso(skb)) {
227 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
229 return xfrm_output2(net, sk, skb);
232 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
237 return xfrm_output_gso(net, sk, skb);
239 if (skb->ip_summed == CHECKSUM_PARTIAL) {
240 err = skb_checksum_help(skb);
242 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
249 return xfrm_output2(net, sk, skb);
251 EXPORT_SYMBOL_GPL(xfrm_output);
253 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
255 struct xfrm_mode *inner_mode;
256 if (x->sel.family == AF_UNSPEC)
257 inner_mode = xfrm_ip2inner_mode(x,
258 xfrm_af2proto(skb_dst(skb)->ops->family));
260 inner_mode = x->inner_mode;
262 if (inner_mode == NULL)
263 return -EAFNOSUPPORT;
264 return inner_mode->afinfo->extract_output(x, skb);
266 EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
268 void xfrm_local_error(struct sk_buff *skb, int mtu)
271 struct xfrm_state_afinfo *afinfo;
273 if (skb->protocol == htons(ETH_P_IP))
275 else if (skb->protocol == htons(ETH_P_IPV6))
280 afinfo = xfrm_state_get_afinfo(proto);
282 afinfo->local_error(skb, mtu);
285 EXPORT_SYMBOL_GPL(xfrm_local_error);