Merge tag 'thermal-6.2-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-block.git] / net / netfilter / nft_payload.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
96518518 2/*
ef1f7df9 3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
18140969 4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
96518518 5 *
96518518
PM
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
7 */
8
9#include <linux/kernel.h>
8cfd23e6 10#include <linux/if_vlan.h>
96518518
PM
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/netlink.h>
14#include <linux/netfilter.h>
15#include <linux/netfilter/nf_tables.h>
16#include <net/netfilter/nf_tables_core.h>
17#include <net/netfilter/nf_tables.h>
c9626a2c 18#include <net/netfilter/nf_tables_offload.h>
18140969
PNA
19/* For layer 4 checksum field offset. */
20#include <linux/tcp.h>
21#include <linux/udp.h>
c247897d 22#include <net/gre.h>
18140969 23#include <linux/icmpv6.h>
c9626a2c
PNA
24#include <linux/ip.h>
25#include <linux/ipv6.h>
346e320c 26#include <net/sctp/checksum.h>
96518518 27
8dfd8b09
PNA
28static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
29 struct vlan_ethhdr *veth)
30{
31 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
32 return false;
33
34 veth->h_vlan_proto = skb->vlan_proto;
35 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
36 veth->h_vlan_encapsulated_proto = skb->protocol;
37
38 return true;
39}
40
8cfd23e6
FW
41/* add vlan header into the user buffer for if tag was removed by offloads */
42static bool
43nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
44{
45 int mac_off = skb_mac_header(skb) - skb->data;
f41f72d0 46 u8 *vlanh, *dst_u8 = (u8 *) d;
8cfd23e6 47 struct vlan_ethhdr veth;
f6ae9f12
PNA
48 u8 vlan_hlen = 0;
49
50 if ((skb->protocol == htons(ETH_P_8021AD) ||
51 skb->protocol == htons(ETH_P_8021Q)) &&
52 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
53 vlan_hlen += VLAN_HLEN;
8cfd23e6
FW
54
55 vlanh = (u8 *) &veth;
f6ae9f12 56 if (offset < VLAN_ETH_HLEN + vlan_hlen) {
f41f72d0 57 u8 ethlen = len;
8cfd23e6 58
f6ae9f12
PNA
59 if (vlan_hlen &&
60 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
61 return false;
62 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
8cfd23e6
FW
63 return false;
64
f6ae9f12 65 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
696e1a48 66 ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
8cfd23e6 67
f6ae9f12 68 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
8cfd23e6
FW
69
70 len -= ethlen;
71 if (len == 0)
72 return true;
73
74 dst_u8 += ethlen;
f6ae9f12 75 offset = ETH_HLEN + vlan_hlen;
f41f72d0 76 } else {
f6ae9f12 77 offset -= VLAN_HLEN + vlan_hlen;
8cfd23e6
FW
78 }
79
8cfd23e6
FW
80 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
81}
82
c46b38dc
PNA
83static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
84{
85 unsigned int thoff = nft_thoff(pkt);
86
a9e8503d 87 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
c46b38dc
PNA
88 return -1;
89
90 switch (pkt->tprot) {
91 case IPPROTO_UDP:
92 pkt->inneroff = thoff + sizeof(struct udphdr);
93 break;
94 case IPPROTO_TCP: {
95 struct tcphdr *th, _tcph;
96
97 th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
98 if (!th)
99 return -1;
100
101 pkt->inneroff = thoff + __tcp_hdrlen(th);
102 }
103 break;
c247897d 104 case IPPROTO_GRE: {
66394126 105 u32 offset = sizeof(struct gre_base_hdr);
c247897d 106 struct gre_base_hdr *gre, _gre;
66394126 107 __be16 version;
c247897d
PNA
108
109 gre = skb_header_pointer(pkt->skb, thoff, sizeof(_gre), &_gre);
110 if (!gre)
111 return -1;
112
113 version = gre->flags & GRE_VERSION;
114 switch (version) {
115 case GRE_VERSION_0:
116 if (gre->flags & GRE_ROUTING)
117 return -1;
118
119 if (gre->flags & GRE_CSUM) {
120 offset += sizeof_field(struct gre_full_hdr, csum) +
121 sizeof_field(struct gre_full_hdr, reserved1);
122 }
123 if (gre->flags & GRE_KEY)
124 offset += sizeof_field(struct gre_full_hdr, key);
125
126 if (gre->flags & GRE_SEQ)
127 offset += sizeof_field(struct gre_full_hdr, seq);
128 break;
129 default:
130 return -1;
131 }
132
133 pkt->inneroff = thoff + offset;
134 }
135 break;
3927ce88
PNA
136 case IPPROTO_IPIP:
137 pkt->inneroff = thoff;
138 break;
c46b38dc
PNA
139 default:
140 return -1;
141 }
142
143 pkt->flags |= NFT_PKTINFO_INNER;
144
145 return 0;
146}
147
3a07327d 148int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
c46b38dc
PNA
149{
150 if (!(pkt->flags & NFT_PKTINFO_INNER) &&
151 __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
152 return -1;
153
154 return pkt->inneroff;
155}
156
10870dd8
FW
157void nft_payload_eval(const struct nft_expr *expr,
158 struct nft_regs *regs,
159 const struct nft_pktinfo *pkt)
96518518
PM
160{
161 const struct nft_payload *priv = nft_expr_priv(expr);
162 const struct sk_buff *skb = pkt->skb;
49499c3e 163 u32 *dest = &regs->data[priv->dreg];
96518518
PM
164 int offset;
165
1e105e6a
FW
166 if (priv->len % NFT_REG32_SIZE)
167 dest[priv->len / NFT_REG32_SIZE] = 0;
168
96518518
PM
169 switch (priv->base) {
170 case NFT_PAYLOAD_LL_HEADER:
171 if (!skb_mac_header_was_set(skb))
172 goto err;
8cfd23e6
FW
173
174 if (skb_vlan_tag_present(skb)) {
175 if (!nft_payload_copy_vlan(dest, skb,
176 priv->offset, priv->len))
177 goto err;
178 return;
179 }
96518518
PM
180 offset = skb_mac_header(skb) - skb->data;
181 break;
182 case NFT_PAYLOAD_NETWORK_HEADER:
183 offset = skb_network_offset(skb);
184 break;
185 case NFT_PAYLOAD_TRANSPORT_HEADER:
a9e8503d 186 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
a20877b5 187 goto err;
2d7b4ace 188 offset = nft_thoff(pkt);
96518518 189 break;
c46b38dc
PNA
190 case NFT_PAYLOAD_INNER_HEADER:
191 offset = nft_payload_inner_offset(pkt);
192 if (offset < 0)
193 goto err;
194 break;
96518518 195 default:
8801d791
PNA
196 WARN_ON_ONCE(1);
197 goto err;
96518518
PM
198 }
199 offset += priv->offset;
200
fad136ea 201 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
96518518
PM
202 goto err;
203 return;
204err:
a55e22e9 205 regs->verdict.code = NFT_BREAK;
96518518
PM
206}
207
208static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
7ec3f7b4
PM
209 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
210 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
211 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
ecaf75ff
FW
212 [NFTA_PAYLOAD_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
213 [NFTA_PAYLOAD_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
7ec3f7b4 214 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
ecaf75ff 215 [NFTA_PAYLOAD_CSUM_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
9d6effb2 216 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
96518518
PM
217};
218
219static int nft_payload_init(const struct nft_ctx *ctx,
220 const struct nft_expr *expr,
221 const struct nlattr * const tb[])
222{
223 struct nft_payload *priv = nft_expr_priv(expr);
96518518 224
c29b72e0 225 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
96518518
PM
226 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
227 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
96518518 228
345023b0
PNA
229 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
230 &priv->dreg, NULL, NFT_DATA_VALUE,
231 priv->len);
96518518
PM
232}
233
7d34aa3e
PS
234static int nft_payload_dump(struct sk_buff *skb,
235 const struct nft_expr *expr, bool reset)
96518518
PM
236{
237 const struct nft_payload *priv = nft_expr_priv(expr);
238
b1c96ed3 239 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
96518518
PM
240 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
241 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
242 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
243 goto nla_put_failure;
244 return 0;
245
246nla_put_failure:
247 return -1;
248}
249
a7c176bf
PNA
250static bool nft_payload_reduce(struct nft_regs_track *track,
251 const struct nft_expr *expr)
252{
253 const struct nft_payload *priv = nft_expr_priv(expr);
254 const struct nft_payload *payload;
255
34cc9e52
PNA
256 if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
257 nft_reg_track_update(track, expr, priv->dreg, priv->len);
a7c176bf
PNA
258 return false;
259 }
260
261 payload = nft_expr_priv(track->regs[priv->dreg].selector);
262 if (priv->base != payload->base ||
263 priv->offset != payload->offset ||
264 priv->len != payload->len) {
34cc9e52 265 nft_reg_track_update(track, expr, priv->dreg, priv->len);
a7c176bf
PNA
266 return false;
267 }
268
269 if (!track->regs[priv->dreg].bitwise)
270 return true;
271
be5650f8 272 return nft_expr_reduce_bitwise(track, expr);
a7c176bf
PNA
273}
274
a5d45bc0
PNA
275static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
276 u32 priv_len, u32 field_len)
277{
278 unsigned int remainder, delta, k;
279 struct nft_data mask = {};
280 __be32 remainder_mask;
281
282 if (priv_len == field_len) {
283 memset(&reg->mask, 0xff, priv_len);
284 return true;
285 } else if (priv_len > field_len) {
286 return false;
287 }
288
289 memset(&mask, 0xff, field_len);
290 remainder = priv_len % sizeof(u32);
291 if (remainder) {
292 k = priv_len / sizeof(u32);
293 delta = field_len - priv_len;
294 remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
295 mask.data[k] = (__force u32)remainder_mask;
296 }
297
298 memcpy(&reg->mask, &mask, field_len);
299
300 return true;
301}
302
c9626a2c
PNA
303static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
304 struct nft_flow_rule *flow,
305 const struct nft_payload *priv)
306{
307 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
308
309 switch (priv->offset) {
310 case offsetof(struct ethhdr, h_source):
a5d45bc0 311 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
a69a85da 312 return -EOPNOTSUPP;
313
c9626a2c
PNA
314 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
315 src, ETH_ALEN, reg);
316 break;
317 case offsetof(struct ethhdr, h_dest):
a5d45bc0 318 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
a69a85da 319 return -EOPNOTSUPP;
320
c9626a2c
PNA
321 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
322 dst, ETH_ALEN, reg);
323 break;
a82055af 324 case offsetof(struct ethhdr, h_proto):
a5d45bc0 325 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
a82055af
PNA
326 return -EOPNOTSUPP;
327
328 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
329 n_proto, sizeof(__be16), reg);
330 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
331 break;
332 case offsetof(struct vlan_ethhdr, h_vlan_TCI):
a5d45bc0 333 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
a82055af
PNA
334 return -EOPNOTSUPP;
335
ff4d90a8
PNA
336 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
337 vlan_tci, sizeof(__be16), reg,
338 NFT_OFFLOAD_F_NETWORK2HOST);
a82055af
PNA
339 break;
340 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
a5d45bc0 341 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
a82055af
PNA
342 return -EOPNOTSUPP;
343
344 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
345 vlan_tpid, sizeof(__be16), reg);
89d8fd44
PNA
346 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
347 break;
348 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
a5d45bc0 349 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
89d8fd44
PNA
350 return -EOPNOTSUPP;
351
ff4d90a8
PNA
352 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
353 vlan_tci, sizeof(__be16), reg,
354 NFT_OFFLOAD_F_NETWORK2HOST);
89d8fd44
PNA
355 break;
356 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
357 sizeof(struct vlan_hdr):
a5d45bc0 358 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
89d8fd44
PNA
359 return -EOPNOTSUPP;
360
14c20643 361 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
89d8fd44 362 vlan_tpid, sizeof(__be16), reg);
14c20643 363 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
a82055af 364 break;
a69a85da 365 default:
366 return -EOPNOTSUPP;
c9626a2c
PNA
367 }
368
369 return 0;
370}
371
372static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
373 struct nft_flow_rule *flow,
374 const struct nft_payload *priv)
375{
376 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
377
378 switch (priv->offset) {
379 case offsetof(struct iphdr, saddr):
a5d45bc0
PNA
380 if (!nft_payload_offload_mask(reg, priv->len,
381 sizeof(struct in_addr)))
a69a85da 382 return -EOPNOTSUPP;
383
c9626a2c
PNA
384 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
385 sizeof(struct in_addr), reg);
3c78e9e0 386 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
c9626a2c
PNA
387 break;
388 case offsetof(struct iphdr, daddr):
a5d45bc0
PNA
389 if (!nft_payload_offload_mask(reg, priv->len,
390 sizeof(struct in_addr)))
a69a85da 391 return -EOPNOTSUPP;
392
c9626a2c
PNA
393 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
394 sizeof(struct in_addr), reg);
3c78e9e0 395 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
c9626a2c
PNA
396 break;
397 case offsetof(struct iphdr, protocol):
a5d45bc0 398 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
a69a85da 399 return -EOPNOTSUPP;
400
c9626a2c
PNA
401 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
402 sizeof(__u8), reg);
403 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
404 break;
405 default:
406 return -EOPNOTSUPP;
407 }
408
409 return 0;
410}
411
412static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
413 struct nft_flow_rule *flow,
414 const struct nft_payload *priv)
415{
416 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
417
418 switch (priv->offset) {
419 case offsetof(struct ipv6hdr, saddr):
a5d45bc0
PNA
420 if (!nft_payload_offload_mask(reg, priv->len,
421 sizeof(struct in6_addr)))
a69a85da 422 return -EOPNOTSUPP;
423
c9626a2c
PNA
424 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
425 sizeof(struct in6_addr), reg);
3c78e9e0 426 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
c9626a2c
PNA
427 break;
428 case offsetof(struct ipv6hdr, daddr):
a5d45bc0
PNA
429 if (!nft_payload_offload_mask(reg, priv->len,
430 sizeof(struct in6_addr)))
a69a85da 431 return -EOPNOTSUPP;
432
c9626a2c
PNA
433 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
434 sizeof(struct in6_addr), reg);
3c78e9e0 435 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
c9626a2c
PNA
436 break;
437 case offsetof(struct ipv6hdr, nexthdr):
a5d45bc0 438 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
a69a85da 439 return -EOPNOTSUPP;
440
c9626a2c
PNA
441 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
442 sizeof(__u8), reg);
443 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
444 break;
445 default:
446 return -EOPNOTSUPP;
447 }
448
449 return 0;
450}
451
452static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
453 struct nft_flow_rule *flow,
454 const struct nft_payload *priv)
455{
456 int err;
457
458 switch (ctx->dep.l3num) {
459 case htons(ETH_P_IP):
460 err = nft_payload_offload_ip(ctx, flow, priv);
461 break;
462 case htons(ETH_P_IPV6):
463 err = nft_payload_offload_ip6(ctx, flow, priv);
464 break;
465 default:
466 return -EOPNOTSUPP;
467 }
468
469 return err;
470}
471
472static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
473 struct nft_flow_rule *flow,
474 const struct nft_payload *priv)
475{
476 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
477
478 switch (priv->offset) {
479 case offsetof(struct tcphdr, source):
a5d45bc0 480 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
a69a85da 481 return -EOPNOTSUPP;
482
c9626a2c
PNA
483 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
484 sizeof(__be16), reg);
485 break;
486 case offsetof(struct tcphdr, dest):
a5d45bc0 487 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
a69a85da 488 return -EOPNOTSUPP;
489
c9626a2c
PNA
490 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
491 sizeof(__be16), reg);
492 break;
493 default:
494 return -EOPNOTSUPP;
495 }
496
497 return 0;
498}
499
500static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
501 struct nft_flow_rule *flow,
502 const struct nft_payload *priv)
503{
504 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
505
506 switch (priv->offset) {
507 case offsetof(struct udphdr, source):
a5d45bc0 508 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
a69a85da 509 return -EOPNOTSUPP;
510
c9626a2c
PNA
511 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
512 sizeof(__be16), reg);
513 break;
514 case offsetof(struct udphdr, dest):
a5d45bc0 515 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
a69a85da 516 return -EOPNOTSUPP;
517
c9626a2c
PNA
518 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
519 sizeof(__be16), reg);
520 break;
521 default:
522 return -EOPNOTSUPP;
523 }
524
525 return 0;
526}
527
528static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
529 struct nft_flow_rule *flow,
530 const struct nft_payload *priv)
531{
532 int err;
533
534 switch (ctx->dep.protonum) {
535 case IPPROTO_TCP:
536 err = nft_payload_offload_tcp(ctx, flow, priv);
537 break;
538 case IPPROTO_UDP:
539 err = nft_payload_offload_udp(ctx, flow, priv);
540 break;
541 default:
542 return -EOPNOTSUPP;
543 }
544
545 return err;
546}
547
548static int nft_payload_offload(struct nft_offload_ctx *ctx,
549 struct nft_flow_rule *flow,
550 const struct nft_expr *expr)
551{
552 const struct nft_payload *priv = nft_expr_priv(expr);
553 int err;
554
555 switch (priv->base) {
556 case NFT_PAYLOAD_LL_HEADER:
557 err = nft_payload_offload_ll(ctx, flow, priv);
558 break;
559 case NFT_PAYLOAD_NETWORK_HEADER:
560 err = nft_payload_offload_nh(ctx, flow, priv);
561 break;
562 case NFT_PAYLOAD_TRANSPORT_HEADER:
563 err = nft_payload_offload_th(ctx, flow, priv);
564 break;
565 default:
566 err = -EOPNOTSUPP;
567 break;
568 }
569 return err;
570}
571
ef1f7df9
PM
572static const struct nft_expr_ops nft_payload_ops = {
573 .type = &nft_payload_type,
96518518 574 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
96518518
PM
575 .eval = nft_payload_eval,
576 .init = nft_payload_init,
577 .dump = nft_payload_dump,
a7c176bf 578 .reduce = nft_payload_reduce,
c9626a2c 579 .offload = nft_payload_offload,
ef1f7df9
PM
580};
581
c29b72e0
PM
582const struct nft_expr_ops nft_payload_fast_ops = {
583 .type = &nft_payload_type,
584 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
585 .eval = nft_payload_eval,
586 .init = nft_payload_init,
587 .dump = nft_payload_dump,
a7c176bf 588 .reduce = nft_payload_reduce,
c9626a2c 589 .offload = nft_payload_offload,
c29b72e0
PM
590};
591
3a07327d
PNA
592void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
593 const struct nft_pktinfo *pkt,
594 struct nft_inner_tun_ctx *tun_ctx)
595{
596 const struct nft_payload *priv = nft_expr_priv(expr);
597 const struct sk_buff *skb = pkt->skb;
598 u32 *dest = &regs->data[priv->dreg];
599 int offset;
600
601 if (priv->len % NFT_REG32_SIZE)
602 dest[priv->len / NFT_REG32_SIZE] = 0;
603
604 switch (priv->base) {
605 case NFT_PAYLOAD_TUN_HEADER:
606 if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TUN))
607 goto err;
608
609 offset = tun_ctx->inner_tunoff;
610 break;
611 case NFT_PAYLOAD_LL_HEADER:
612 if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_LL))
613 goto err;
614
615 offset = tun_ctx->inner_lloff;
616 break;
617 case NFT_PAYLOAD_NETWORK_HEADER:
618 if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_NH))
619 goto err;
620
621 offset = tun_ctx->inner_nhoff;
622 break;
623 case NFT_PAYLOAD_TRANSPORT_HEADER:
624 if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH))
625 goto err;
626
627 offset = tun_ctx->inner_thoff;
628 break;
629 default:
630 WARN_ON_ONCE(1);
631 goto err;
632 }
633 offset += priv->offset;
634
635 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
636 goto err;
637
638 return;
639err:
640 regs->verdict.code = NFT_BREAK;
641}
642
643static int nft_payload_inner_init(const struct nft_ctx *ctx,
644 const struct nft_expr *expr,
645 const struct nlattr * const tb[])
646{
647 struct nft_payload *priv = nft_expr_priv(expr);
648 u32 base;
649
650 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
651 switch (base) {
652 case NFT_PAYLOAD_TUN_HEADER:
653 case NFT_PAYLOAD_LL_HEADER:
654 case NFT_PAYLOAD_NETWORK_HEADER:
655 case NFT_PAYLOAD_TRANSPORT_HEADER:
656 break;
657 default:
658 return -EOPNOTSUPP;
659 }
660
661 priv->base = base;
662 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
663 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
664
665 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
666 &priv->dreg, NULL, NFT_DATA_VALUE,
667 priv->len);
668}
669
670static const struct nft_expr_ops nft_payload_inner_ops = {
671 .type = &nft_payload_type,
672 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
673 .init = nft_payload_inner_init,
674 .dump = nft_payload_dump,
675 /* direct call to nft_payload_inner_eval(). */
676};
677
18140969
PNA
678static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
679{
680 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
681 if (*sum == 0)
682 *sum = CSUM_MANGLED_0;
683}
684
685static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
686{
687 struct udphdr *uh, _uh;
688
689 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
690 if (!uh)
691 return false;
692
5fd02ebe 693 return (__force bool)uh->check;
18140969
PNA
694}
695
696static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
697 struct sk_buff *skb,
698 unsigned int *l4csum_offset)
699{
4e1860a3
PNA
700 if (pkt->fragoff)
701 return -1;
702
18140969
PNA
703 switch (pkt->tprot) {
704 case IPPROTO_TCP:
705 *l4csum_offset = offsetof(struct tcphdr, check);
706 break;
707 case IPPROTO_UDP:
2d7b4ace 708 if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
18140969 709 return -1;
954d8297 710 fallthrough;
18140969
PNA
711 case IPPROTO_UDPLITE:
712 *l4csum_offset = offsetof(struct udphdr, check);
713 break;
714 case IPPROTO_ICMPV6:
715 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
716 break;
717 default:
718 return -1;
719 }
720
2d7b4ace 721 *l4csum_offset += nft_thoff(pkt);
18140969
PNA
722 return 0;
723}
724
346e320c
DC
725static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
726{
727 struct sctphdr *sh;
728
729 if (skb_ensure_writable(skb, offset + sizeof(*sh)))
730 return -1;
731
732 sh = (struct sctphdr *)(skb->data + offset);
733 sh->checksum = sctp_compute_cksum(skb, offset);
734 skb->ip_summed = CHECKSUM_UNNECESSARY;
735 return 0;
736}
737
18140969
PNA
738static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
739 struct sk_buff *skb,
740 __wsum fsum, __wsum tsum)
741{
742 int l4csum_offset;
743 __sum16 sum;
744
745 /* If we cannot determine layer 4 checksum offset or this packet doesn't
746 * require layer 4 checksum recalculation, skip this packet.
747 */
748 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
749 return 0;
750
751 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
752 return -1;
753
754 /* Checksum mangling for an arbitrary amount of bytes, based on
755 * inet_proto_csum_replace*() functions.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL) {
758 nft_csum_replace(&sum, fsum, tsum);
759 if (skb->ip_summed == CHECKSUM_COMPLETE) {
760 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
761 tsum);
762 }
763 } else {
764 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
765 tsum));
766 }
767
7418ee4c 768 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
18140969
PNA
769 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
770 return -1;
771
772 return 0;
773}
774
053d20f5
PNA
775static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
776 __wsum fsum, __wsum tsum, int csum_offset)
777{
778 __sum16 sum;
779
780 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
781 return -1;
782
783 nft_csum_replace(&sum, fsum, tsum);
7418ee4c 784 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
053d20f5
PNA
785 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
786 return -1;
787
788 return 0;
789}
790
ac1f8c04
PNA
791struct nft_payload_set {
792 enum nft_payload_bases base:8;
793 u8 offset;
794 u8 len;
795 u8 sreg;
796 u8 csum_type;
797 u8 csum_offset;
798 u8 csum_flags;
799};
800
7ec3f7b4
PM
801static void nft_payload_set_eval(const struct nft_expr *expr,
802 struct nft_regs *regs,
803 const struct nft_pktinfo *pkt)
804{
805 const struct nft_payload_set *priv = nft_expr_priv(expr);
806 struct sk_buff *skb = pkt->skb;
807 const u32 *src = &regs->data[priv->sreg];
808 int offset, csum_offset;
809 __wsum fsum, tsum;
7ec3f7b4
PM
810
811 switch (priv->base) {
812 case NFT_PAYLOAD_LL_HEADER:
813 if (!skb_mac_header_was_set(skb))
814 goto err;
815 offset = skb_mac_header(skb) - skb->data;
816 break;
817 case NFT_PAYLOAD_NETWORK_HEADER:
818 offset = skb_network_offset(skb);
819 break;
820 case NFT_PAYLOAD_TRANSPORT_HEADER:
a9e8503d 821 if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
a20877b5 822 goto err;
2d7b4ace 823 offset = nft_thoff(pkt);
7ec3f7b4 824 break;
c46b38dc
PNA
825 case NFT_PAYLOAD_INNER_HEADER:
826 offset = nft_payload_inner_offset(pkt);
827 if (offset < 0)
828 goto err;
829 break;
7ec3f7b4 830 default:
8801d791
PNA
831 WARN_ON_ONCE(1);
832 goto err;
7ec3f7b4
PM
833 }
834
835 csum_offset = offset + priv->csum_offset;
836 offset += priv->offset;
837
053d20f5 838 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
c46b38dc
PNA
839 ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
840 priv->base != NFT_PAYLOAD_INNER_HEADER) ||
7ec3f7b4 841 skb->ip_summed != CHECKSUM_PARTIAL)) {
7ec3f7b4
PM
842 fsum = skb_checksum(skb, offset, priv->len, 0);
843 tsum = csum_partial(src, priv->len, 0);
7ec3f7b4 844
053d20f5
PNA
845 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
846 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
7ec3f7b4 847 goto err;
18140969
PNA
848
849 if (priv->csum_flags &&
850 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
851 goto err;
7ec3f7b4
PM
852 }
853
7418ee4c 854 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
7ec3f7b4
PM
855 skb_store_bits(skb, offset, src, priv->len) < 0)
856 goto err;
857
346e320c
DC
858 if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
859 pkt->tprot == IPPROTO_SCTP &&
860 skb->ip_summed != CHECKSUM_PARTIAL) {
a9e8503d
FW
861 if (pkt->fragoff == 0 &&
862 nft_payload_csum_sctp(skb, nft_thoff(pkt)))
346e320c
DC
863 goto err;
864 }
865
7ec3f7b4
PM
866 return;
867err:
868 regs->verdict.code = NFT_BREAK;
869}
870
871static int nft_payload_set_init(const struct nft_ctx *ctx,
872 const struct nft_expr *expr,
873 const struct nlattr * const tb[])
874{
875 struct nft_payload_set *priv = nft_expr_priv(expr);
7044ab28
PNA
876 u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
877 int err;
7ec3f7b4
PM
878
879 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
880 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
881 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
7ec3f7b4
PM
882
883 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
7044ab28
PNA
884 csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
885 if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
886 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
887 &csum_offset);
888 if (err < 0)
889 return err;
890
891 priv->csum_offset = csum_offset;
892 }
18140969
PNA
893 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
894 u32 flags;
895
896 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
897 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
898 return -EINVAL;
899
900 priv->csum_flags = flags;
901 }
7ec3f7b4 902
7044ab28 903 switch (csum_type) {
7ec3f7b4
PM
904 case NFT_PAYLOAD_CSUM_NONE:
905 case NFT_PAYLOAD_CSUM_INET:
906 break;
346e320c
DC
907 case NFT_PAYLOAD_CSUM_SCTP:
908 if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
909 return -EINVAL;
910
911 if (priv->csum_offset != offsetof(struct sctphdr, checksum))
912 return -EINVAL;
913 break;
7ec3f7b4
PM
914 default:
915 return -EOPNOTSUPP;
916 }
7044ab28 917 priv->csum_type = csum_type;
7ec3f7b4 918
4f16d25c
PNA
919 return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
920 priv->len);
7ec3f7b4
PM
921}
922
7d34aa3e
PS
923static int nft_payload_set_dump(struct sk_buff *skb,
924 const struct nft_expr *expr, bool reset)
7ec3f7b4
PM
925{
926 const struct nft_payload_set *priv = nft_expr_priv(expr);
927
928 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
929 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
930 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
931 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
932 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
933 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
18140969
PNA
934 htonl(priv->csum_offset)) ||
935 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
7ec3f7b4
PM
936 goto nla_put_failure;
937 return 0;
938
939nla_put_failure:
940 return -1;
941}
942
cc003c7e
PNA
943static bool nft_payload_set_reduce(struct nft_regs_track *track,
944 const struct nft_expr *expr)
945{
946 int i;
947
948 for (i = 0; i < NFT_REG32_NUM; i++) {
949 if (!track->regs[i].selector)
950 continue;
951
952 if (track->regs[i].selector->ops != &nft_payload_ops &&
953 track->regs[i].selector->ops != &nft_payload_fast_ops)
954 continue;
955
34cc9e52 956 __nft_reg_track_cancel(track, i);
cc003c7e
PNA
957 }
958
959 return false;
960}
961
7ec3f7b4
PM
962static const struct nft_expr_ops nft_payload_set_ops = {
963 .type = &nft_payload_type,
964 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
965 .eval = nft_payload_set_eval,
966 .init = nft_payload_set_init,
967 .dump = nft_payload_set_dump,
cc003c7e 968 .reduce = nft_payload_set_reduce,
7ec3f7b4
PM
969};
970
0ca743a5
PNA
971static const struct nft_expr_ops *
972nft_payload_select_ops(const struct nft_ctx *ctx,
973 const struct nlattr * const tb[])
c29b72e0
PM
974{
975 enum nft_payload_bases base;
976 unsigned int offset, len;
94254f99 977 int err;
c29b72e0 978
7ec3f7b4 979 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
c29b72e0
PM
980 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
981 tb[NFTA_PAYLOAD_LEN] == NULL)
982 return ERR_PTR(-EINVAL);
983
984 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
985 switch (base) {
986 case NFT_PAYLOAD_LL_HEADER:
987 case NFT_PAYLOAD_NETWORK_HEADER:
988 case NFT_PAYLOAD_TRANSPORT_HEADER:
c46b38dc 989 case NFT_PAYLOAD_INNER_HEADER:
c29b72e0
PM
990 break;
991 default:
992 return ERR_PTR(-EOPNOTSUPP);
993 }
994
7ec3f7b4
PM
995 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
996 if (tb[NFTA_PAYLOAD_DREG] != NULL)
997 return ERR_PTR(-EINVAL);
998 return &nft_payload_set_ops;
999 }
1000
1001 if (tb[NFTA_PAYLOAD_DREG] == NULL)
1002 return ERR_PTR(-EINVAL);
1003
94254f99
PNA
1004 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
1005 if (err < 0)
1006 return ERR_PTR(err);
1007
1008 err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
1009 if (err < 0)
1010 return ERR_PTR(err);
c29b72e0 1011
f627ed91 1012 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
c46b38dc 1013 base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
c29b72e0
PM
1014 return &nft_payload_fast_ops;
1015 else
1016 return &nft_payload_ops;
1017}
1018
4e24877e 1019struct nft_expr_type nft_payload_type __read_mostly = {
ef1f7df9 1020 .name = "payload",
c29b72e0 1021 .select_ops = nft_payload_select_ops,
3a07327d 1022 .inner_ops = &nft_payload_inner_ops,
96518518
PM
1023 .policy = nft_payload_policy,
1024 .maxattr = NFTA_PAYLOAD_MAX,
ef1f7df9 1025 .owner = THIS_MODULE,
96518518 1026};