Merge tag 'soc-fixes-5.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-block.git] / net / ipv4 / esp4_offload.c
CommitLineData
75a6faf6 1// SPDX-License-Identifier: GPL-2.0-only
7785bba2
SK
2/*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 *
7785bba2
SK
9 * ESP GRO support
10 */
11
12#include <linux/skbuff.h>
13#include <linux/init.h>
14#include <net/protocol.h>
15#include <crypto/aead.h>
16#include <crypto/authenc.h>
17#include <linux/err.h>
18#include <linux/module.h>
4721031c 19#include <net/gro.h>
7785bba2
SK
20#include <net/ip.h>
21#include <net/xfrm.h>
22#include <net/esp.h>
23#include <linux/scatterlist.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/spinlock.h>
27#include <net/udp.h>
28
d4546c25
DM
29static struct sk_buff *esp4_gro_receive(struct list_head *head,
30 struct sk_buff *skb)
7785bba2
SK
31{
32 int offset = skb_gro_offset(skb);
33 struct xfrm_offload *xo;
34 struct xfrm_state *x;
35 __be32 seq;
36 __be32 spi;
7785bba2 37
374d1b5a
SK
38 if (!pskb_pull(skb, offset))
39 return NULL;
7785bba2 40
335a2a1f 41 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
7785bba2
SK
42 goto out;
43
bcd1f8a4
SK
44 xo = xfrm_offload(skb);
45 if (!xo || !(xo->flags & CRYPTO_DONE)) {
0ca64da1
FW
46 struct sec_path *sp = secpath_set(skb);
47
48 if (!sp)
bcd1f8a4 49 goto out;
7785bba2 50
0ca64da1 51 if (sp->len == XFRM_MAX_DEPTH)
6ed69184 52 goto out_reset;
7785bba2 53
bcd1f8a4
SK
54 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
55 (xfrm_address_t *)&ip_hdr(skb)->daddr,
56 spi, IPPROTO_ESP, AF_INET);
57 if (!x)
6ed69184 58 goto out_reset;
7785bba2 59
4e4362d2
UW
60 skb->mark = xfrm_smark_get(skb->mark, x);
61
0ca64da1
FW
62 sp->xvec[sp->len++] = x;
63 sp->olen++;
7785bba2 64
bcd1f8a4 65 xo = xfrm_offload(skb);
db87668a 66 if (!xo)
6ed69184 67 goto out_reset;
7785bba2 68 }
bcd1f8a4 69
7785bba2
SK
70 xo->flags |= XFRM_GRO;
71
72 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
73 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
74 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
75 XFRM_SPI_SKB_CB(skb)->seq = seq;
76
77 /* We don't need to handle errors from xfrm_input, it does all
78 * the error handling and frees the resources on error. */
79 xfrm_input(skb, IPPROTO_ESP, spi, -2);
80
81 return ERR_PTR(-EINPROGRESS);
6ed69184
MJ
82out_reset:
83 secpath_reset(skb);
7785bba2
SK
84out:
85 skb_push(skb, offset);
86 NAPI_GRO_CB(skb)->same_flow = 0;
87 NAPI_GRO_CB(skb)->flush = 1;
88
89 return NULL;
90}
91
7862b405
SK
92static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
93{
94 struct ip_esp_hdr *esph;
95 struct iphdr *iph = ip_hdr(skb);
96 struct xfrm_offload *xo = xfrm_offload(skb);
97 int proto = iph->protocol;
98
99 skb_push(skb, -skb_network_offset(skb));
100 esph = ip_esp_hdr(skb);
101 *skb_mac_header(skb) = IPPROTO_ESP;
102
103 esph->spi = x->id.spi;
104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105
106 xo->proto = proto;
107}
108
7613b92b
FW
109static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
110 struct sk_buff *skb,
111 netdev_features_t features)
112{
23c7f8d7 113 return skb_eth_gso_segment(skb, features, htons(ETH_P_IP));
7613b92b
FW
114}
115
116static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
117 struct sk_buff *skb,
118 netdev_features_t features)
119{
120 const struct net_offload *ops;
121 struct sk_buff *segs = ERR_PTR(-EINVAL);
122 struct xfrm_offload *xo = xfrm_offload(skb);
123
124 skb->transport_header += x->props.header_len;
125 ops = rcu_dereference(inet_offloads[xo->proto]);
126 if (likely(ops && ops->callbacks.gso_segment))
127 segs = ops->callbacks.gso_segment(skb, features);
128
129 return segs;
130}
131
384a46ea
XL
132static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
133 struct sk_buff *skb,
134 netdev_features_t features)
135{
136 struct xfrm_offload *xo = xfrm_offload(skb);
137 struct sk_buff *segs = ERR_PTR(-EINVAL);
138 const struct net_offload *ops;
6f297068 139 u8 proto = xo->proto;
384a46ea
XL
140
141 skb->transport_header += x->props.header_len;
142
3ffb93ba
XL
143 if (x->sel.family != AF_INET6) {
144 if (proto == IPPROTO_BEETPH) {
145 struct ip_beet_phdr *ph =
146 (struct ip_beet_phdr *)skb->data;
147
148 skb->transport_header += ph->hdrlen * 8;
149 proto = ph->nexthdr;
150 } else {
151 skb->transport_header -= IPV4_BEET_PHMAXLEN;
152 }
153 } else {
6f297068
XL
154 __be16 frag;
155
156 skb->transport_header +=
157 ipv6_skip_exthdr(skb, 0, &proto, &frag);
158 if (proto == IPPROTO_TCP)
159 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
384a46ea
XL
160 }
161
053c8fdf
SK
162 if (proto == IPPROTO_IPV6)
163 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
164
384a46ea
XL
165 __skb_pull(skb, skb_transport_offset(skb));
166 ops = rcu_dereference(inet_offloads[proto]);
167 if (likely(ops && ops->callbacks.gso_segment))
168 segs = ops->callbacks.gso_segment(skb, features);
169
170 return segs;
171}
172
7613b92b
FW
173static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
174 struct sk_buff *skb,
175 netdev_features_t features)
176{
c9500d7b 177 switch (x->outer_mode.encap) {
7613b92b
FW
178 case XFRM_MODE_TUNNEL:
179 return xfrm4_tunnel_gso_segment(x, skb, features);
180 case XFRM_MODE_TRANSPORT:
181 return xfrm4_transport_gso_segment(x, skb, features);
384a46ea
XL
182 case XFRM_MODE_BEET:
183 return xfrm4_beet_gso_segment(x, skb, features);
7613b92b
FW
184 }
185
186 return ERR_PTR(-EOPNOTSUPP);
187}
188
7862b405
SK
189static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
190 netdev_features_t features)
191{
7862b405
SK
192 struct xfrm_state *x;
193 struct ip_esp_hdr *esph;
194 struct crypto_aead *aead;
7862b405
SK
195 netdev_features_t esp_features = features;
196 struct xfrm_offload *xo = xfrm_offload(skb);
2294be0f 197 struct sec_path *sp;
7862b405
SK
198
199 if (!xo)
3dca3f38 200 return ERR_PTR(-EINVAL);
7862b405 201
121d57af 202 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
5ca11440 203 return ERR_PTR(-EINVAL);
7862b405 204
2294be0f
FW
205 sp = skb_sec_path(skb);
206 x = sp->xvec[sp->len - 1];
7862b405
SK
207 aead = x->data;
208 esph = ip_esp_hdr(skb);
209
210 if (esph->spi != x->id.spi)
3dca3f38 211 return ERR_PTR(-EINVAL);
7862b405
SK
212
213 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
3dca3f38 214 return ERR_PTR(-EINVAL);
7862b405
SK
215
216 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
217
218 skb->encap_hdr_csum = 1;
219
65fd2c2a
BP
220 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
221 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
154deab6
XL
222 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
223 NETIF_F_SCTP_CRC);
65fd2c2a
BP
224 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
225 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
154deab6
XL
226 esp_features = features & ~(NETIF_F_CSUM_MASK |
227 NETIF_F_SCTP_CRC);
7862b405 228
3dca3f38 229 xo->flags |= XFRM_GSO_SEGMENT;
7862b405 230
7613b92b 231 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
7862b405
SK
232}
233
fca11ebd
SK
234static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
235{
236 struct crypto_aead *aead = x->data;
ec9567a9 237 struct xfrm_offload *xo = xfrm_offload(skb);
fca11ebd
SK
238
239 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
240 return -EINVAL;
241
ec9567a9
IT
242 if (!(xo->flags & CRYPTO_DONE))
243 skb->ip_summed = CHECKSUM_NONE;
fca11ebd
SK
244
245 return esp_input_done2(skb, 0);
246}
247
248static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
249{
250 int err;
251 int alen;
252 int blksize;
253 struct xfrm_offload *xo;
254 struct ip_esp_hdr *esph;
255 struct crypto_aead *aead;
256 struct esp_info esp;
257 bool hw_offload = true;
3dca3f38 258 __u32 seq;
fca11ebd
SK
259
260 esp.inplace = true;
261
262 xo = xfrm_offload(skb);
263
264 if (!xo)
265 return -EINVAL;
266
65fd2c2a
BP
267 if ((!(features & NETIF_F_HW_ESP) &&
268 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
269 x->xso.dev != skb->dev) {
fca11ebd
SK
270 xo->flags |= CRYPTO_FALLBACK;
271 hw_offload = false;
272 }
273
274 esp.proto = xo->proto;
275
276 /* skb is pure payload to encrypt */
277
278 aead = x->data;
279 alen = crypto_aead_authsize(aead);
280
281 esp.tfclen = 0;
282 /* XXX: Add support for tfc padding here. */
283
284 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
285 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
286 esp.plen = esp.clen - skb->len - esp.tfclen;
287 esp.tailen = esp.tfclen + esp.plen + alen;
288
289 esp.esph = ip_esp_hdr(skb);
290
291
0c87b1ac 292 if (!hw_offload || !skb_is_gso(skb)) {
fca11ebd
SK
293 esp.nfrags = esp_output_head(x, skb, &esp);
294 if (esp.nfrags < 0)
295 return esp.nfrags;
296 }
297
3dca3f38
SK
298 seq = xo->seq.low;
299
fca11ebd
SK
300 esph = esp.esph;
301 esph->spi = x->id.spi;
302
303 skb_push(skb, -skb_network_offset(skb));
304
305 if (xo->flags & XFRM_GSO_SEGMENT) {
3dca3f38
SK
306 esph->seq_no = htonl(seq);
307
308 if (!skb_is_gso(skb))
309 xo->seq.low++;
310 else
311 xo->seq.low += skb_shinfo(skb)->gso_segs;
fca11ebd
SK
312 }
313
3dca3f38
SK
314 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
315
316 ip_hdr(skb)->tot_len = htons(skb->len);
317 ip_send_check(ip_hdr(skb));
318
c7dbf4c0
SK
319 if (hw_offload) {
320 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
321 return -ENOMEM;
322
323 xo = xfrm_offload(skb);
324 if (!xo)
325 return -EINVAL;
326
327 xo->flags |= XFRM_XMIT;
fca11ebd 328 return 0;
c7dbf4c0 329 }
fca11ebd 330
fca11ebd 331 err = esp_output_tail(x, skb, &esp);
4ff0308f 332 if (err)
fca11ebd
SK
333 return err;
334
335 secpath_reset(skb);
336
337 return 0;
338}
339
7785bba2
SK
340static const struct net_offload esp4_offload = {
341 .callbacks = {
342 .gro_receive = esp4_gro_receive,
7862b405 343 .gso_segment = esp4_gso_segment,
7785bba2
SK
344 },
345};
346
fca11ebd 347static const struct xfrm_type_offload esp_type_offload = {
fca11ebd
SK
348 .owner = THIS_MODULE,
349 .proto = IPPROTO_ESP,
350 .input_tail = esp_input_tail,
351 .xmit = esp_xmit,
7862b405 352 .encap = esp4_gso_encap,
fca11ebd
SK
353};
354
7785bba2
SK
355static int __init esp4_offload_init(void)
356{
fca11ebd
SK
357 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
358 pr_info("%s: can't add xfrm type offload\n", __func__);
359 return -EAGAIN;
360 }
361
7785bba2
SK
362 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
363}
364
365static void __exit esp4_offload_exit(void)
366{
4f518e80 367 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
7785bba2
SK
368 inet_del_offload(&esp4_offload, IPPROTO_ESP);
369}
370
371module_init(esp4_offload_init);
372module_exit(esp4_offload_exit);
373MODULE_LICENSE("GPL");
374MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
ffdb5211 375MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
67c20de3 376MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");