Commit | Line | Data |
---|---|---|
7785bba2 SK |
1 | /* |
2 | * IPV4 GSO/GRO offload support | |
3 | * Linux INET implementation | |
4 | * | |
5 | * Copyright (C) 2016 secunet Security Networks AG | |
6 | * Author: Steffen Klassert <steffen.klassert@secunet.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms and conditions of the GNU General Public License, | |
10 | * version 2, as published by the Free Software Foundation. | |
11 | * | |
12 | * ESP GRO support | |
13 | */ | |
14 | ||
15 | #include <linux/skbuff.h> | |
16 | #include <linux/init.h> | |
17 | #include <net/protocol.h> | |
18 | #include <crypto/aead.h> | |
19 | #include <crypto/authenc.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/module.h> | |
22 | #include <net/ip.h> | |
23 | #include <net/xfrm.h> | |
24 | #include <net/esp.h> | |
25 | #include <linux/scatterlist.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <net/udp.h> | |
30 | ||
31 | static struct sk_buff **esp4_gro_receive(struct sk_buff **head, | |
32 | struct sk_buff *skb) | |
33 | { | |
34 | int offset = skb_gro_offset(skb); | |
35 | struct xfrm_offload *xo; | |
36 | struct xfrm_state *x; | |
37 | __be32 seq; | |
38 | __be32 spi; | |
39 | int err; | |
40 | ||
41 | skb_pull(skb, offset); | |
42 | ||
43 | if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) | |
44 | goto out; | |
45 | ||
bcd1f8a4 SK |
46 | xo = xfrm_offload(skb); |
47 | if (!xo || !(xo->flags & CRYPTO_DONE)) { | |
48 | err = secpath_set(skb); | |
49 | if (err) | |
50 | goto out; | |
7785bba2 | 51 | |
bcd1f8a4 SK |
52 | if (skb->sp->len == XFRM_MAX_DEPTH) |
53 | goto out; | |
7785bba2 | 54 | |
bcd1f8a4 SK |
55 | x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, |
56 | (xfrm_address_t *)&ip_hdr(skb)->daddr, | |
57 | spi, IPPROTO_ESP, AF_INET); | |
58 | if (!x) | |
59 | goto out; | |
7785bba2 | 60 | |
bcd1f8a4 SK |
61 | skb->sp->xvec[skb->sp->len++] = x; |
62 | skb->sp->olen++; | |
7785bba2 | 63 | |
bcd1f8a4 SK |
64 | xo = xfrm_offload(skb); |
65 | if (!xo) { | |
66 | xfrm_state_put(x); | |
67 | goto out; | |
68 | } | |
7785bba2 | 69 | } |
bcd1f8a4 | 70 | |
7785bba2 SK |
71 | xo->flags |= XFRM_GRO; |
72 | ||
73 | XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; | |
74 | XFRM_SPI_SKB_CB(skb)->family = AF_INET; | |
75 | XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); | |
76 | XFRM_SPI_SKB_CB(skb)->seq = seq; | |
77 | ||
78 | /* We don't need to handle errors from xfrm_input, it does all | |
79 | * the error handling and frees the resources on error. */ | |
80 | xfrm_input(skb, IPPROTO_ESP, spi, -2); | |
81 | ||
82 | return ERR_PTR(-EINPROGRESS); | |
83 | out: | |
84 | skb_push(skb, offset); | |
85 | NAPI_GRO_CB(skb)->same_flow = 0; | |
86 | NAPI_GRO_CB(skb)->flush = 1; | |
87 | ||
88 | return NULL; | |
89 | } | |
90 | ||
7862b405 SK |
91 | static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) |
92 | { | |
93 | struct ip_esp_hdr *esph; | |
94 | struct iphdr *iph = ip_hdr(skb); | |
95 | struct xfrm_offload *xo = xfrm_offload(skb); | |
96 | int proto = iph->protocol; | |
97 | ||
98 | skb_push(skb, -skb_network_offset(skb)); | |
99 | esph = ip_esp_hdr(skb); | |
100 | *skb_mac_header(skb) = IPPROTO_ESP; | |
101 | ||
102 | esph->spi = x->id.spi; | |
103 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); | |
104 | ||
105 | xo->proto = proto; | |
106 | } | |
107 | ||
108 | static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, | |
109 | netdev_features_t features) | |
110 | { | |
111 | __u32 seq; | |
112 | int err = 0; | |
113 | struct sk_buff *skb2; | |
114 | struct xfrm_state *x; | |
115 | struct ip_esp_hdr *esph; | |
116 | struct crypto_aead *aead; | |
117 | struct sk_buff *segs = ERR_PTR(-EINVAL); | |
118 | netdev_features_t esp_features = features; | |
119 | struct xfrm_offload *xo = xfrm_offload(skb); | |
120 | ||
121 | if (!xo) | |
122 | goto out; | |
123 | ||
124 | seq = xo->seq.low; | |
125 | ||
126 | x = skb->sp->xvec[skb->sp->len - 1]; | |
127 | aead = x->data; | |
128 | esph = ip_esp_hdr(skb); | |
129 | ||
130 | if (esph->spi != x->id.spi) | |
131 | goto out; | |
132 | ||
133 | if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) | |
134 | goto out; | |
135 | ||
136 | __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); | |
137 | ||
138 | skb->encap_hdr_csum = 1; | |
139 | ||
140 | if (!(features & NETIF_F_HW_ESP)) | |
141 | esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); | |
142 | ||
143 | segs = x->outer_mode->gso_segment(x, skb, esp_features); | |
144 | if (IS_ERR_OR_NULL(segs)) | |
145 | goto out; | |
146 | ||
147 | __skb_pull(skb, skb->data - skb_mac_header(skb)); | |
148 | ||
149 | skb2 = segs; | |
150 | do { | |
151 | struct sk_buff *nskb = skb2->next; | |
152 | ||
153 | xo = xfrm_offload(skb2); | |
154 | xo->flags |= XFRM_GSO_SEGMENT; | |
155 | xo->seq.low = seq; | |
156 | xo->seq.hi = xfrm_replay_seqhi(x, seq); | |
157 | ||
158 | if(!(features & NETIF_F_HW_ESP)) | |
159 | xo->flags |= CRYPTO_FALLBACK; | |
160 | ||
161 | x->outer_mode->xmit(x, skb2); | |
162 | ||
163 | err = x->type_offload->xmit(x, skb2, esp_features); | |
164 | if (err) { | |
165 | kfree_skb_list(segs); | |
166 | return ERR_PTR(err); | |
167 | } | |
168 | ||
169 | if (!skb_is_gso(skb2)) | |
170 | seq++; | |
171 | else | |
172 | seq += skb_shinfo(skb2)->gso_segs; | |
173 | ||
174 | skb_push(skb2, skb2->mac_len); | |
175 | skb2 = nskb; | |
176 | } while (skb2); | |
177 | ||
178 | out: | |
179 | return segs; | |
180 | } | |
181 | ||
fca11ebd SK |
182 | static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) |
183 | { | |
184 | struct crypto_aead *aead = x->data; | |
ec9567a9 | 185 | struct xfrm_offload *xo = xfrm_offload(skb); |
fca11ebd SK |
186 | |
187 | if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) | |
188 | return -EINVAL; | |
189 | ||
ec9567a9 IT |
190 | if (!(xo->flags & CRYPTO_DONE)) |
191 | skb->ip_summed = CHECKSUM_NONE; | |
fca11ebd SK |
192 | |
193 | return esp_input_done2(skb, 0); | |
194 | } | |
195 | ||
196 | static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) | |
197 | { | |
198 | int err; | |
199 | int alen; | |
200 | int blksize; | |
201 | struct xfrm_offload *xo; | |
202 | struct ip_esp_hdr *esph; | |
203 | struct crypto_aead *aead; | |
204 | struct esp_info esp; | |
205 | bool hw_offload = true; | |
206 | ||
207 | esp.inplace = true; | |
208 | ||
209 | xo = xfrm_offload(skb); | |
210 | ||
211 | if (!xo) | |
212 | return -EINVAL; | |
213 | ||
8f92e03e IT |
214 | if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || |
215 | (x->xso.dev != skb->dev)) { | |
fca11ebd SK |
216 | xo->flags |= CRYPTO_FALLBACK; |
217 | hw_offload = false; | |
218 | } | |
219 | ||
220 | esp.proto = xo->proto; | |
221 | ||
222 | /* skb is pure payload to encrypt */ | |
223 | ||
224 | aead = x->data; | |
225 | alen = crypto_aead_authsize(aead); | |
226 | ||
227 | esp.tfclen = 0; | |
228 | /* XXX: Add support for tfc padding here. */ | |
229 | ||
230 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); | |
231 | esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); | |
232 | esp.plen = esp.clen - skb->len - esp.tfclen; | |
233 | esp.tailen = esp.tfclen + esp.plen + alen; | |
234 | ||
235 | esp.esph = ip_esp_hdr(skb); | |
236 | ||
237 | ||
238 | if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { | |
239 | esp.nfrags = esp_output_head(x, skb, &esp); | |
240 | if (esp.nfrags < 0) | |
241 | return esp.nfrags; | |
242 | } | |
243 | ||
244 | esph = esp.esph; | |
245 | esph->spi = x->id.spi; | |
246 | ||
247 | skb_push(skb, -skb_network_offset(skb)); | |
248 | ||
249 | if (xo->flags & XFRM_GSO_SEGMENT) { | |
250 | esph->seq_no = htonl(xo->seq.low); | |
251 | } else { | |
252 | ip_hdr(skb)->tot_len = htons(skb->len); | |
253 | ip_send_check(ip_hdr(skb)); | |
254 | } | |
255 | ||
256 | if (hw_offload) | |
257 | return 0; | |
258 | ||
259 | esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); | |
260 | ||
261 | err = esp_output_tail(x, skb, &esp); | |
4ff0308f | 262 | if (err) |
fca11ebd SK |
263 | return err; |
264 | ||
265 | secpath_reset(skb); | |
266 | ||
267 | return 0; | |
268 | } | |
269 | ||
7785bba2 SK |
270 | static const struct net_offload esp4_offload = { |
271 | .callbacks = { | |
272 | .gro_receive = esp4_gro_receive, | |
7862b405 | 273 | .gso_segment = esp4_gso_segment, |
7785bba2 SK |
274 | }, |
275 | }; | |
276 | ||
fca11ebd SK |
277 | static const struct xfrm_type_offload esp_type_offload = { |
278 | .description = "ESP4 OFFLOAD", | |
279 | .owner = THIS_MODULE, | |
280 | .proto = IPPROTO_ESP, | |
281 | .input_tail = esp_input_tail, | |
282 | .xmit = esp_xmit, | |
7862b405 | 283 | .encap = esp4_gso_encap, |
fca11ebd SK |
284 | }; |
285 | ||
7785bba2 SK |
286 | static int __init esp4_offload_init(void) |
287 | { | |
fca11ebd SK |
288 | if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) { |
289 | pr_info("%s: can't add xfrm type offload\n", __func__); | |
290 | return -EAGAIN; | |
291 | } | |
292 | ||
7785bba2 SK |
293 | return inet_add_offload(&esp4_offload, IPPROTO_ESP); |
294 | } | |
295 | ||
296 | static void __exit esp4_offload_exit(void) | |
297 | { | |
fca11ebd SK |
298 | if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0) |
299 | pr_info("%s: can't remove xfrm type offload\n", __func__); | |
300 | ||
7785bba2 SK |
301 | inet_del_offload(&esp4_offload, IPPROTO_ESP); |
302 | } | |
303 | ||
304 | module_init(esp4_offload_init); | |
305 | module_exit(esp4_offload_exit); | |
306 | MODULE_LICENSE("GPL"); | |
307 | MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); | |
ffdb5211 | 308 | MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP); |