Merge tag 'kbuild-fixes-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/masah...
[linux-block.git] / net / xfrm / xfrm_device.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
21f42cc9
SK
2/*
3 * xfrm_device.c - IPsec device offloading code.
4 *
5 * Copyright (c) 2015 secunet Security Networks AG
6 *
7 * Author:
8 * Steffen Klassert <steffen.klassert@secunet.com>
21f42cc9
SK
9 */
10
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/netdevice.h>
14#include <linux/skbuff.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <net/dst.h>
d457a0e3 18#include <net/gso.h>
21f42cc9
SK
19#include <net/xfrm.h>
20#include <linux/notifier.h>
21
b81f884a 22#ifdef CONFIG_XFRM_OFFLOAD
303c5fab
FW
23static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
24 unsigned int hsize)
25{
26 struct xfrm_offload *xo = xfrm_offload(skb);
27
28 skb_reset_mac_len(skb);
06a0afcf 29 if (xo->flags & XFRM_GSO_SEGMENT)
303c5fab 30 skb->transport_header -= x->props.header_len;
06a0afcf
XL
31
32 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
303c5fab
FW
33}
34
35static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
36 unsigned int hsize)
37
38{
39 struct xfrm_offload *xo = xfrm_offload(skb);
40
41 if (xo->flags & XFRM_GSO_SEGMENT)
42 skb->transport_header = skb->network_header + hsize;
43
44 skb_reset_mac_len(skb);
45 pskb_pull(skb, skb->mac_len + x->props.header_len);
46}
47
30849175
XL
48static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
49 unsigned int hsize)
50{
51 struct xfrm_offload *xo = xfrm_offload(skb);
52 int phlen = 0;
53
54 if (xo->flags & XFRM_GSO_SEGMENT)
55 skb->transport_header = skb->network_header + hsize;
56
57 skb_reset_mac_len(skb);
58 if (x->sel.family != AF_INET6) {
59 phlen = IPV4_BEET_PHMAXLEN;
60 if (x->outer_mode.family == AF_INET6)
61 phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
62 }
63
64 pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
65}
66
303c5fab
FW
67/* Adjust pointers into the packet when IPsec is done at layer2 */
68static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
69{
c9500d7b 70 switch (x->outer_mode.encap) {
303c5fab 71 case XFRM_MODE_TUNNEL:
c9500d7b 72 if (x->outer_mode.family == AF_INET)
303c5fab
FW
73 return __xfrm_mode_tunnel_prep(x, skb,
74 sizeof(struct iphdr));
c9500d7b 75 if (x->outer_mode.family == AF_INET6)
303c5fab
FW
76 return __xfrm_mode_tunnel_prep(x, skb,
77 sizeof(struct ipv6hdr));
78 break;
79 case XFRM_MODE_TRANSPORT:
c9500d7b 80 if (x->outer_mode.family == AF_INET)
303c5fab
FW
81 return __xfrm_transport_prep(x, skb,
82 sizeof(struct iphdr));
c9500d7b 83 if (x->outer_mode.family == AF_INET6)
303c5fab
FW
84 return __xfrm_transport_prep(x, skb,
85 sizeof(struct ipv6hdr));
86 break;
30849175
XL
87 case XFRM_MODE_BEET:
88 if (x->outer_mode.family == AF_INET)
89 return __xfrm_mode_beet_prep(x, skb,
90 sizeof(struct iphdr));
91 if (x->outer_mode.family == AF_INET6)
92 return __xfrm_mode_beet_prep(x, skb,
93 sizeof(struct ipv6hdr));
94 break;
303c5fab
FW
95 case XFRM_MODE_ROUTEOPTIMIZATION:
96 case XFRM_MODE_IN_TRIGGER:
303c5fab
FW
97 break;
98 }
99}
100
4b549ccc
CL
101static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb)
102{
103 struct xfrm_offload *xo = xfrm_offload(skb);
104 __u32 seq = xo->seq.low;
105
106 seq += skb_shinfo(skb)->gso_segs;
107 if (unlikely(seq < xo->seq.low))
108 return true;
109
110 return false;
111}
112
f53c7239 113struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
f6e27114
SK
114{
115 int err;
f53c7239 116 unsigned long flags;
f6e27114 117 struct xfrm_state *x;
f53c7239 118 struct softnet_data *sd;
d1d17a35 119 struct sk_buff *skb2, *nskb, *pskb = NULL;
3dca3f38 120 netdev_features_t esp_features = features;
f6e27114 121 struct xfrm_offload *xo = xfrm_offload(skb);
272c2330 122 struct net_device *dev = skb->dev;
2294be0f 123 struct sec_path *sp;
f6e27114 124
94579ac3 125 if (!xo || (xo->flags & XFRM_XMIT))
3dca3f38 126 return skb;
f6e27114 127
3dca3f38
SK
128 if (!(features & NETIF_F_HW_ESP))
129 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
f6e27114 130
2294be0f
FW
131 sp = skb_sec_path(skb);
132 x = sp->xvec[sp->len - 1];
482db2f1 133 if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
3dca3f38
SK
134 return skb;
135
f8a70afa
LR
136 /* The packet was sent to HW IPsec packet offload engine,
137 * but to wrong device. Drop the packet, so it won't skip
138 * XFRM stack.
139 */
140 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
141 kfree_skb(skb);
142 dev_core_stats_tx_dropped_inc(dev);
143 return NULL;
144 }
145
bdfd2d1f
JW
146 /* This skb was already validated on the upper/virtual dev */
147 if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
272c2330
JW
148 return skb;
149
f53c7239
SK
150 local_irq_save(flags);
151 sd = this_cpu_ptr(&softnet_data);
152 err = !skb_queue_empty(&sd->xfrm_backlog);
153 local_irq_restore(flags);
154
155 if (err) {
156 *again = true;
157 return skb;
158 }
159
4b549ccc
CL
160 if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
161 unlikely(xmit_xfrm_check_overflow(skb)))) {
272c2330 162 struct sk_buff *segs;
3dca3f38 163
272c2330
JW
164 /* Packet got rerouted, fixup features and segment it. */
165 esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
3dca3f38 166
272c2330
JW
167 segs = skb_gso_segment(skb, esp_features);
168 if (IS_ERR(segs)) {
169 kfree_skb(skb);
625788b5 170 dev_core_stats_tx_dropped_inc(dev);
272c2330
JW
171 return NULL;
172 } else {
173 consume_skb(skb);
174 skb = segs;
3dca3f38
SK
175 }
176 }
177
178 if (!skb->next) {
65fd2c2a 179 esp_features |= skb->dev->gso_partial_features;
303c5fab 180 xfrm_outer_mode_prep(x, skb);
f6e27114 181
f53c7239
SK
182 xo->flags |= XFRM_DEV_RESUME;
183
3dca3f38 184 err = x->type_offload->xmit(x, skb, esp_features);
f6e27114 185 if (err) {
f53c7239
SK
186 if (err == -EINPROGRESS)
187 return NULL;
188
f6e27114 189 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
3dca3f38
SK
190 kfree_skb(skb);
191 return NULL;
f6e27114
SK
192 }
193
194 skb_push(skb, skb->data - skb_mac_header(skb));
3dca3f38
SK
195
196 return skb;
f6e27114
SK
197 }
198
c3b18e0d 199 skb_list_walk_safe(skb, skb2, nskb) {
65fd2c2a 200 esp_features |= skb->dev->gso_partial_features;
a8305bff 201 skb_mark_not_on_list(skb2);
3dca3f38
SK
202
203 xo = xfrm_offload(skb2);
f53c7239 204 xo->flags |= XFRM_DEV_RESUME;
3dca3f38 205
303c5fab 206 xfrm_outer_mode_prep(x, skb2);
3dca3f38
SK
207
208 err = x->type_offload->xmit(x, skb2, esp_features);
f53c7239
SK
209 if (!err) {
210 skb2->next = nskb;
211 } else if (err != -EINPROGRESS) {
3dca3f38
SK
212 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
213 skb2->next = nskb;
214 kfree_skb_list(skb2);
215 return NULL;
f53c7239
SK
216 } else {
217 if (skb == skb2)
218 skb = nskb;
d1d17a35
XL
219 else
220 pskb->next = nskb;
3dca3f38 221
c3b18e0d 222 continue;
f53c7239 223 }
3dca3f38
SK
224
225 skb_push(skb2, skb2->data - skb_mac_header(skb2));
d1d17a35 226 pskb = skb2;
c3b18e0d 227 }
3dca3f38
SK
228
229 return skb;
f6e27114
SK
230}
231EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
232
d77e38e6 233int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
adb5c33e
SD
234 struct xfrm_user_offload *xuo,
235 struct netlink_ext_ack *extack)
d77e38e6
SK
236{
237 int err;
238 struct dst_entry *dst;
239 struct net_device *dev;
87e0a94e 240 struct xfrm_dev_offload *xso = &x->xso;
d77e38e6
SK
241 xfrm_address_t *saddr;
242 xfrm_address_t *daddr;
62f6eca5 243 bool is_packet_offload;
d77e38e6 244
adb5c33e
SD
245 if (!x->type_offload) {
246 NL_SET_ERR_MSG(extack, "Type doesn't support offload");
ffdb5211 247 return -EINVAL;
adb5c33e 248 }
d77e38e6 249
62f6eca5
LR
250 if (xuo->flags &
251 ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) {
adb5c33e 252 NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
7c76ecd9 253 return -EINVAL;
adb5c33e 254 }
7c76ecd9 255
a4a87fa4
AA
256 if ((xuo->flags & XFRM_OFFLOAD_INBOUND && x->dir == XFRM_SA_DIR_OUT) ||
257 (!(xuo->flags & XFRM_OFFLOAD_INBOUND) && x->dir == XFRM_SA_DIR_IN)) {
258 NL_SET_ERR_MSG(extack, "Mismatched SA and offload direction");
259 return -EINVAL;
260 }
261
62f6eca5 262 is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
89edf402
LR
263
264 /* We don't yet support UDP encapsulation and TFC padding. */
265 if ((!is_packet_offload && x->encap) || x->tfcpad) {
266 NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded");
267 return -EINVAL;
268 }
269
d77e38e6
SK
270 dev = dev_get_by_index(net, xuo->ifindex);
271 if (!dev) {
272 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
273 saddr = &x->props.saddr;
274 daddr = &x->id.daddr;
275 } else {
276 saddr = &x->id.daddr;
277 daddr = &x->props.saddr;
278 }
279
077fbac4 280 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
9b42c1f1
SK
281 x->props.family,
282 xfrm_smark_get(0, x));
d77e38e6 283 if (IS_ERR(dst))
62f6eca5 284 return (is_packet_offload) ? -EINVAL : 0;
d77e38e6
SK
285
286 dev = dst->dev;
287
288 dev_hold(dev);
289 dst_release(dst);
290 }
291
292 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
67a63387 293 xso->dev = NULL;
d77e38e6 294 dev_put(dev);
62f6eca5 295 return (is_packet_offload) ? -EINVAL : 0;
d77e38e6
SK
296 }
297
3e1c957f 298 if (!is_packet_offload && x->props.flags & XFRM_STATE_ESN &&
50bd870a 299 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
adb5c33e 300 NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN");
50bd870a
YE
301 xso->dev = NULL;
302 dev_put(dev);
303 return -EINVAL;
304 }
305
d77e38e6 306 xso->dev = dev;
e1b539bd 307 netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
bdfd2d1f 308 xso->real_dev = dev;
d77e38e6 309
482db2f1
LR
310 if (xuo->flags & XFRM_OFFLOAD_INBOUND)
311 xso->dir = XFRM_DEV_OFFLOAD_IN;
312 else
313 xso->dir = XFRM_DEV_OFFLOAD_OUT;
314
62f6eca5
LR
315 if (is_packet_offload)
316 xso->type = XFRM_DEV_OFFLOAD_PACKET;
317 else
318 xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
d14f28b8 319
7681a4f5 320 err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack);
d77e38e6 321 if (err) {
aa5dd6fa 322 xso->dev = NULL;
482db2f1 323 xso->dir = 0;
dd72fadf 324 xso->real_dev = NULL;
d62607c3 325 netdev_put(dev, &xso->dev_tracker);
d14f28b8 326 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
4a132095 327
62f6eca5
LR
328 /* User explicitly requested packet offload mode and configured
329 * policy in addition to the XFRM state. So be civil to users,
330 * and return an error instead of taking fallback path.
331 *
332 * This WARN_ON() can be seen as a documentation for driver
333 * authors to do not return -EOPNOTSUPP in packet offload mode.
334 */
335 WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
028fb19c
LR
336 if (err != -EOPNOTSUPP || is_packet_offload) {
337 NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state");
4a132095 338 return err;
028fb19c 339 }
d77e38e6
SK
340 }
341
342 return 0;
343}
344EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
345
919e43fa
LR
346int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
347 struct xfrm_user_offload *xuo, u8 dir,
348 struct netlink_ext_ack *extack)
349{
350 struct xfrm_dev_offload *xdo = &xp->xdo;
351 struct net_device *dev;
352 int err;
353
354 if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
355 /* We support only packet offload mode and it means
356 * that user must set XFRM_OFFLOAD_PACKET bit.
357 */
358 NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
359 return -EINVAL;
360 }
361
362 dev = dev_get_by_index(net, xuo->ifindex);
363 if (!dev)
364 return -EINVAL;
365
366 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
367 xdo->dev = NULL;
368 dev_put(dev);
369 NL_SET_ERR_MSG(extack, "Policy offload is not supported");
370 return -EINVAL;
371 }
372
373 xdo->dev = dev;
374 netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
375 xdo->real_dev = dev;
376 xdo->type = XFRM_DEV_OFFLOAD_PACKET;
377 switch (dir) {
378 case XFRM_POLICY_IN:
379 xdo->dir = XFRM_DEV_OFFLOAD_IN;
380 break;
381 case XFRM_POLICY_OUT:
382 xdo->dir = XFRM_DEV_OFFLOAD_OUT;
383 break;
384 case XFRM_POLICY_FWD:
385 xdo->dir = XFRM_DEV_OFFLOAD_FWD;
386 break;
387 default:
388 xdo->dev = NULL;
ec8f32ad 389 netdev_put(dev, &xdo->dev_tracker);
abe2343d 390 NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
919e43fa
LR
391 return -EINVAL;
392 }
393
3089386d 394 err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack);
919e43fa
LR
395 if (err) {
396 xdo->dev = NULL;
397 xdo->real_dev = NULL;
398 xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
399 xdo->dir = 0;
400 netdev_put(dev, &xdo->dev_tracker);
028fb19c 401 NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this policy");
919e43fa
LR
402 return err;
403 }
404
405 return 0;
406}
407EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
408
d77e38e6
SK
409bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
410{
411 int mtu;
412 struct dst_entry *dst = skb_dst(skb);
413 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
414 struct net_device *dev = x->xso.dev;
415
773bb766
LR
416 if (!x->type_offload ||
417 (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
d77e38e6
SK
418 return false;
419
f8a70afa
LR
420 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
421 ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
422 !xdst->child->xfrm)) {
c7b37c76 423 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
d77e38e6
SK
424 if (skb->len <= mtu)
425 goto ok;
426
779b7931 427 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
d77e38e6
SK
428 goto ok;
429 }
430
431 return false;
432
433ok:
434 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
435 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
436
437 return true;
438}
439EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
f53c7239
SK
440
441void xfrm_dev_resume(struct sk_buff *skb)
442{
443 struct net_device *dev = skb->dev;
444 int ret = NETDEV_TX_BUSY;
445 struct netdev_queue *txq;
446 struct softnet_data *sd;
447 unsigned long flags;
448
449 rcu_read_lock();
4bd97d51 450 txq = netdev_core_pick_tx(dev, skb, NULL);
f53c7239
SK
451
452 HARD_TX_LOCK(dev, txq, smp_processor_id());
453 if (!netif_xmit_frozen_or_stopped(txq))
454 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
455 HARD_TX_UNLOCK(dev, txq);
456
457 if (!dev_xmit_complete(ret)) {
458 local_irq_save(flags);
459 sd = this_cpu_ptr(&softnet_data);
460 skb_queue_tail(&sd->xfrm_backlog, skb);
461 raise_softirq_irqoff(NET_TX_SOFTIRQ);
462 local_irq_restore(flags);
463 }
464 rcu_read_unlock();
465}
466EXPORT_SYMBOL_GPL(xfrm_dev_resume);
467
468void xfrm_dev_backlog(struct softnet_data *sd)
469{
470 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
471 struct sk_buff_head list;
472 struct sk_buff *skb;
473
474 if (skb_queue_empty(xfrm_backlog))
475 return;
476
477 __skb_queue_head_init(&list);
478
479 spin_lock(&xfrm_backlog->lock);
480 skb_queue_splice_init(xfrm_backlog, &list);
481 spin_unlock(&xfrm_backlog->lock);
482
483 while (!skb_queue_empty(&list)) {
484 skb = __skb_dequeue(&list);
485 xfrm_dev_resume(skb);
486 }
487
488}
b81f884a 489#endif
d77e38e6 490
92a23206 491static int xfrm_api_check(struct net_device *dev)
d77e38e6 492{
92a23206 493#ifdef CONFIG_XFRM_OFFLOAD
d77e38e6
SK
494 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
495 !(dev->features & NETIF_F_HW_ESP))
496 return NOTIFY_BAD;
497
92a23206
SN
498 if ((dev->features & NETIF_F_HW_ESP) &&
499 (!(dev->xfrmdev_ops &&
500 dev->xfrmdev_ops->xdo_dev_state_add &&
501 dev->xfrmdev_ops->xdo_dev_state_delete)))
502 return NOTIFY_BAD;
503#else
504 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
505 return NOTIFY_BAD;
506#endif
507
d77e38e6
SK
508 return NOTIFY_DONE;
509}
510
d77e38e6
SK
511static int xfrm_dev_down(struct net_device *dev)
512{
919e43fa 513 if (dev->features & NETIF_F_HW_ESP) {
d77e38e6 514 xfrm_dev_state_flush(dev_net(dev), dev, true);
919e43fa
LR
515 xfrm_dev_policy_flush(dev_net(dev), dev, true);
516 }
d77e38e6 517
d77e38e6
SK
518 return NOTIFY_DONE;
519}
520
21f42cc9
SK
521static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
522{
523 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
524
525 switch (event) {
d77e38e6 526 case NETDEV_REGISTER:
2ecda181 527 return xfrm_api_check(dev);
d77e38e6 528
d77e38e6 529 case NETDEV_FEAT_CHANGE:
2ecda181 530 return xfrm_api_check(dev);
d77e38e6 531
21f42cc9 532 case NETDEV_DOWN:
03891f82 533 case NETDEV_UNREGISTER:
d77e38e6 534 return xfrm_dev_down(dev);
21f42cc9
SK
535 }
536 return NOTIFY_DONE;
537}
538
539static struct notifier_block xfrm_dev_notifier = {
540 .notifier_call = xfrm_dev_event,
541};
542
e9a441b6 543void __init xfrm_dev_init(void)
21f42cc9
SK
544{
545 register_netdevice_notifier(&xfrm_dev_notifier);
546}