net: use skb_sec_path helper in more places
[linux-2.6-block.git] / net / ipv6 / esp6_offload.c
CommitLineData
7785bba2
SK
1/*
2 * IPV6 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * Copyright (C) 2016 secunet Security Networks AG
6 * Author: Steffen Klassert <steffen.klassert@secunet.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * ESP GRO support
13 */
14
15#include <linux/skbuff.h>
16#include <linux/init.h>
17#include <net/protocol.h>
18#include <crypto/aead.h>
19#include <crypto/authenc.h>
20#include <linux/err.h>
21#include <linux/module.h>
22#include <net/ip.h>
23#include <net/xfrm.h>
24#include <net/esp.h>
25#include <linux/scatterlist.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <net/ip6_route.h>
30#include <net/ipv6.h>
31#include <linux/icmpv6.h>
32
ca3a1b85
YK
33static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
34{
35 int off = sizeof(struct ipv6hdr);
36 struct ipv6_opt_hdr *exthdr;
37
38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
39 return offsetof(struct ipv6hdr, nexthdr);
40
41 while (off < nhlen) {
42 exthdr = (void *)ipv6_hdr + off;
43 if (exthdr->nexthdr == NEXTHDR_ESP)
44 return off;
45
46 off += ipv6_optlen(exthdr);
47 }
48
49 return 0;
50}
51
d4546c25
DM
52static struct sk_buff *esp6_gro_receive(struct list_head *head,
53 struct sk_buff *skb)
7785bba2
SK
54{
55 int offset = skb_gro_offset(skb);
56 struct xfrm_offload *xo;
57 struct xfrm_state *x;
58 __be32 seq;
59 __be32 spi;
ca3a1b85 60 int nhoff;
7785bba2
SK
61 int err;
62
374d1b5a
SK
63 if (!pskb_pull(skb, offset))
64 return NULL;
7785bba2
SK
65
66 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
67 goto out;
68
bcd1f8a4
SK
69 xo = xfrm_offload(skb);
70 if (!xo || !(xo->flags & CRYPTO_DONE)) {
0ca64da1
FW
71 struct sec_path *sp = secpath_set(skb);
72
73 if (!sp)
bcd1f8a4 74 goto out;
7785bba2 75
0ca64da1 76 if (sp->len == XFRM_MAX_DEPTH)
bcd1f8a4 77 goto out;
7785bba2 78
bcd1f8a4
SK
79 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
80 (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
81 spi, IPPROTO_ESP, AF_INET6);
82 if (!x)
83 goto out;
7785bba2 84
0ca64da1
FW
85 sp->xvec[sp->len++] = x;
86 sp->olen++;
7785bba2 87
bcd1f8a4
SK
88 xo = xfrm_offload(skb);
89 if (!xo) {
90 xfrm_state_put(x);
91 goto out;
92 }
7785bba2 93 }
bcd1f8a4 94
7785bba2
SK
95 xo->flags |= XFRM_GRO;
96
ca3a1b85
YK
97 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
98 if (!nhoff)
99 goto out;
100
101 IP6CB(skb)->nhoff = nhoff;
7785bba2
SK
102 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
103 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
104 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
105 XFRM_SPI_SKB_CB(skb)->seq = seq;
106
107 /* We don't need to handle errors from xfrm_input, it does all
108 * the error handling and frees the resources on error. */
109 xfrm_input(skb, IPPROTO_ESP, spi, -2);
110
111 return ERR_PTR(-EINPROGRESS);
112out:
113 skb_push(skb, offset);
114 NAPI_GRO_CB(skb)->same_flow = 0;
115 NAPI_GRO_CB(skb)->flush = 1;
116
117 return NULL;
118}
119
7862b405
SK
120static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
121{
122 struct ip_esp_hdr *esph;
123 struct ipv6hdr *iph = ipv6_hdr(skb);
124 struct xfrm_offload *xo = xfrm_offload(skb);
125 int proto = iph->nexthdr;
126
127 skb_push(skb, -skb_network_offset(skb));
128 esph = ip_esp_hdr(skb);
129 *skb_mac_header(skb) = IPPROTO_ESP;
130
131 esph->spi = x->id.spi;
132 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
133
134 xo->proto = proto;
135}
136
137static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
138 netdev_features_t features)
139{
7862b405
SK
140 struct xfrm_state *x;
141 struct ip_esp_hdr *esph;
142 struct crypto_aead *aead;
7862b405
SK
143 netdev_features_t esp_features = features;
144 struct xfrm_offload *xo = xfrm_offload(skb);
2294be0f 145 struct sec_path *sp;
7862b405 146
ffa6f571 147 if (!xo)
3dca3f38 148 return ERR_PTR(-EINVAL);
7862b405 149
121d57af 150 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
5ca11440 151 return ERR_PTR(-EINVAL);
7862b405 152
2294be0f
FW
153 sp = skb_sec_path(skb);
154 x = sp->xvec[sp->len - 1];
7862b405
SK
155 aead = x->data;
156 esph = ip_esp_hdr(skb);
157
158 if (esph->spi != x->id.spi)
3dca3f38 159 return ERR_PTR(-EINVAL);
7862b405
SK
160
161 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
3dca3f38 162 return ERR_PTR(-EINVAL);
7862b405
SK
163
164 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
165
166 skb->encap_hdr_csum = 1;
167
fcb662de 168 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
7862b405 169 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
5211fcfb
SN
170 else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
171 esp_features = features & ~NETIF_F_CSUM_MASK;
7862b405 172
3dca3f38 173 xo->flags |= XFRM_GSO_SEGMENT;
7862b405 174
3dca3f38 175 return x->outer_mode->gso_segment(x, skb, esp_features);
7862b405
SK
176}
177
383d0350
SK
178static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
179{
180 struct crypto_aead *aead = x->data;
e51a6472 181 struct xfrm_offload *xo = xfrm_offload(skb);
383d0350
SK
182
183 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
184 return -EINVAL;
185
e51a6472
IT
186 if (!(xo->flags & CRYPTO_DONE))
187 skb->ip_summed = CHECKSUM_NONE;
383d0350
SK
188
189 return esp6_input_done2(skb, 0);
190}
191
192static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
193{
3dca3f38 194 int len;
383d0350
SK
195 int err;
196 int alen;
197 int blksize;
198 struct xfrm_offload *xo;
199 struct ip_esp_hdr *esph;
200 struct crypto_aead *aead;
201 struct esp_info esp;
202 bool hw_offload = true;
3dca3f38 203 __u32 seq;
383d0350
SK
204
205 esp.inplace = true;
206
207 xo = xfrm_offload(skb);
208
209 if (!xo)
210 return -EINVAL;
211
fcb662de 212 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
383d0350 213 xo->flags |= CRYPTO_FALLBACK;
8f92e03e 214 hw_offload = false;
383d0350
SK
215 }
216
217 esp.proto = xo->proto;
218
219 /* skb is pure payload to encrypt */
220
221 aead = x->data;
222 alen = crypto_aead_authsize(aead);
223
224 esp.tfclen = 0;
225 /* XXX: Add support for tfc padding here. */
226
227 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
228 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
229 esp.plen = esp.clen - skb->len - esp.tfclen;
230 esp.tailen = esp.tfclen + esp.plen + alen;
231
232 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
233 esp.nfrags = esp6_output_head(x, skb, &esp);
234 if (esp.nfrags < 0)
235 return esp.nfrags;
236 }
237
3dca3f38
SK
238 seq = xo->seq.low;
239
383d0350
SK
240 esph = ip_esp_hdr(skb);
241 esph->spi = x->id.spi;
242
243 skb_push(skb, -skb_network_offset(skb));
244
245 if (xo->flags & XFRM_GSO_SEGMENT) {
3dca3f38 246 esph->seq_no = htonl(seq);
383d0350 247
3dca3f38
SK
248 if (!skb_is_gso(skb))
249 xo->seq.low++;
250 else
251 xo->seq.low += skb_shinfo(skb)->gso_segs;
383d0350
SK
252 }
253
3dca3f38
SK
254 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
255
256 len = skb->len - sizeof(struct ipv6hdr);
257 if (len > IPV6_MAXPLEN)
258 len = 0;
259
260 ipv6_hdr(skb)->payload_len = htons(len);
261
8f92e03e 262 if (hw_offload)
383d0350
SK
263 return 0;
264
383d0350 265 err = esp6_output_tail(x, skb, &esp);
4ff0308f 266 if (err)
383d0350
SK
267 return err;
268
269 secpath_reset(skb);
270
271 return 0;
272}
273
7785bba2
SK
274static const struct net_offload esp6_offload = {
275 .callbacks = {
276 .gro_receive = esp6_gro_receive,
7862b405 277 .gso_segment = esp6_gso_segment,
7785bba2
SK
278 },
279};
280
383d0350
SK
281static const struct xfrm_type_offload esp6_type_offload = {
282 .description = "ESP6 OFFLOAD",
283 .owner = THIS_MODULE,
284 .proto = IPPROTO_ESP,
285 .input_tail = esp6_input_tail,
286 .xmit = esp6_xmit,
7862b405 287 .encap = esp6_gso_encap,
383d0350
SK
288};
289
7785bba2
SK
290static int __init esp6_offload_init(void)
291{
383d0350
SK
292 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
293 pr_info("%s: can't add xfrm type offload\n", __func__);
294 return -EAGAIN;
295 }
296
7785bba2
SK
297 return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
298}
299
300static void __exit esp6_offload_exit(void)
301{
383d0350
SK
302 if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0)
303 pr_info("%s: can't remove xfrm type offload\n", __func__);
304
7785bba2
SK
305 inet6_del_offload(&esp6_offload, IPPROTO_ESP);
306}
307
308module_init(esp6_offload_init);
309module_exit(esp6_offload_exit);
310MODULE_LICENSE("GPL");
311MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
ffdb5211 312MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);