1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 TCPHDR_PSH | TCPHDR_URG)
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 (FLOW_DIS_IS_FRAGMENT | \
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 BIT(FLOW_DISSECTOR_KEY_IP))
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
57 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
58 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
59 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
61 #define NFP_FLOWER_MERGE_FIELDS \
62 (NFP_FLOWER_LAYER_PORT | \
63 NFP_FLOWER_LAYER_MAC | \
64 NFP_FLOWER_LAYER_TP | \
65 NFP_FLOWER_LAYER_IPV4 | \
66 NFP_FLOWER_LAYER_IPV6)
68 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
69 (NFP_FLOWER_LAYER_PORT | \
70 NFP_FLOWER_LAYER_MAC | \
71 NFP_FLOWER_LAYER_IPV4 | \
72 NFP_FLOWER_LAYER_IPV6)
74 struct nfp_flower_merge_check {
78 struct nfp_flower_mac_mpls l2;
79 struct nfp_flower_tp_ports l4;
81 struct nfp_flower_ipv4 ipv4;
82 struct nfp_flower_ipv6 ipv6;
85 unsigned long vals[8];
90 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
93 u32 meta_len, key_len, mask_len, act_len, tot_len;
97 meta_len = sizeof(struct nfp_fl_rule_metadata);
98 key_len = nfp_flow->meta.key_len;
99 mask_len = nfp_flow->meta.mask_len;
100 act_len = nfp_flow->meta.act_len;
102 tot_len = meta_len + key_len + mask_len + act_len;
104 /* Convert to long words as firmware expects
105 * lengths in units of NFP_FL_LW_SIZ.
107 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
108 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
109 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
111 skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
115 msg = nfp_flower_cmsg_get_data(skb);
116 memcpy(msg, &nfp_flow->meta, meta_len);
117 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
118 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
119 memcpy(&msg[meta_len + key_len + mask_len],
120 nfp_flow->action_data, act_len);
122 /* Convert back to bytes as software expects
123 * lengths in units of bytes.
125 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
126 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
127 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
129 nfp_ctrl_tx(app->ctrl, skb);
134 static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
136 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
138 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
139 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
140 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
141 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
144 static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
146 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
148 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
149 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
153 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
154 u32 *key_layer_two, int *key_size, bool ipv6,
155 struct netlink_ext_ack *extack)
157 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
158 (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
159 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
163 if (enc_opts->len > 0) {
164 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
165 *key_size += sizeof(struct nfp_flower_geneve_options);
172 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
173 struct flow_dissector_key_enc_opts *enc_op,
174 u32 *key_layer_two, u8 *key_layer, int *key_size,
175 struct nfp_flower_priv *priv,
176 enum nfp_flower_tun_type *tun_type, bool ipv6,
177 struct netlink_ext_ack *extack)
181 switch (enc_ports->dst) {
182 case htons(IANA_VXLAN_UDP_PORT):
183 *tun_type = NFP_FL_TUNNEL_VXLAN;
184 *key_layer |= NFP_FLOWER_LAYER_VXLAN;
187 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
188 *key_size += sizeof(struct nfp_flower_ext_meta);
189 *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
190 *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
192 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
196 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
200 case htons(GENEVE_UDP_PORT):
201 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
202 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
205 *tun_type = NFP_FL_TUNNEL_GENEVE;
206 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
207 *key_size += sizeof(struct nfp_flower_ext_meta);
208 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
211 *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
212 *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
214 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
219 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
220 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
223 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
229 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
237 nfp_flower_calculate_key_layers(struct nfp_app *app,
238 struct net_device *netdev,
239 struct nfp_fl_key_ls *ret_key_ls,
240 struct flow_cls_offload *flow,
241 enum nfp_flower_tun_type *tun_type,
242 struct netlink_ext_ack *extack)
244 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
245 struct flow_dissector *dissector = rule->match.dissector;
246 struct flow_match_basic basic = { NULL, NULL};
247 struct nfp_flower_priv *priv = app->priv;
253 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
254 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
258 /* If any tun dissector is used then the required set must be used. */
259 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
260 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
261 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
262 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
263 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
264 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
269 key_layer = NFP_FLOWER_LAYER_PORT;
270 key_size = sizeof(struct nfp_flower_meta_tci) +
271 sizeof(struct nfp_flower_in_port);
273 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
274 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
275 key_layer |= NFP_FLOWER_LAYER_MAC;
276 key_size += sizeof(struct nfp_flower_mac_mpls);
279 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
280 struct flow_match_vlan vlan;
282 flow_rule_match_vlan(rule, &vlan);
283 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
284 vlan.key->vlan_priority) {
285 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
290 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
291 struct flow_match_enc_opts enc_op = { NULL, NULL };
292 struct flow_match_ipv4_addrs ipv4_addrs;
293 struct flow_match_ipv6_addrs ipv6_addrs;
294 struct flow_match_control enc_ctl;
295 struct flow_match_ports enc_ports;
296 bool ipv6_tun = false;
298 flow_rule_match_enc_control(rule, &enc_ctl);
300 if (enc_ctl.mask->addr_type != 0xffff) {
301 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
305 ipv6_tun = enc_ctl.key->addr_type ==
306 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
308 !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
309 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
314 enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
315 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
320 flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
321 if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
322 sizeof(ipv6_addrs.mask->dst))) {
323 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
327 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
328 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
329 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
334 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
335 flow_rule_match_enc_opts(rule, &enc_op);
337 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
338 /* check if GRE, which has no enc_ports */
339 if (!netif_is_gretap(netdev)) {
340 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
344 *tun_type = NFP_FL_TUNNEL_GRE;
345 key_layer |= NFP_FLOWER_LAYER_EXT_META;
346 key_size += sizeof(struct nfp_flower_ext_meta);
347 key_layer_two |= NFP_FLOWER_LAYER2_GRE;
350 key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
352 sizeof(struct nfp_flower_ipv6_udp_tun);
355 sizeof(struct nfp_flower_ipv4_udp_tun);
359 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
363 flow_rule_match_enc_ports(rule, &enc_ports);
364 if (enc_ports.mask->dst != cpu_to_be16(~0)) {
365 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
369 err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
379 /* Ensure the ingress netdev matches the expected
382 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
383 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
389 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
390 flow_rule_match_basic(rule, &basic);
392 if (basic.mask && basic.mask->n_proto) {
393 /* Ethernet type is present in the key. */
394 switch (basic.key->n_proto) {
395 case cpu_to_be16(ETH_P_IP):
396 key_layer |= NFP_FLOWER_LAYER_IPV4;
397 key_size += sizeof(struct nfp_flower_ipv4);
400 case cpu_to_be16(ETH_P_IPV6):
401 key_layer |= NFP_FLOWER_LAYER_IPV6;
402 key_size += sizeof(struct nfp_flower_ipv6);
405 /* Currently we do not offload ARP
406 * because we rely on it to get to the host.
408 case cpu_to_be16(ETH_P_ARP):
409 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
412 case cpu_to_be16(ETH_P_MPLS_UC):
413 case cpu_to_be16(ETH_P_MPLS_MC):
414 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
415 key_layer |= NFP_FLOWER_LAYER_MAC;
416 key_size += sizeof(struct nfp_flower_mac_mpls);
420 /* Will be included in layer 2. */
421 case cpu_to_be16(ETH_P_8021Q):
425 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
428 } else if (nfp_flower_check_higher_than_mac(flow)) {
429 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
433 if (basic.mask && basic.mask->ip_proto) {
434 switch (basic.key->ip_proto) {
440 key_layer |= NFP_FLOWER_LAYER_TP;
441 key_size += sizeof(struct nfp_flower_tp_ports);
446 if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
447 nfp_flower_check_higher_than_l3(flow)) {
448 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
452 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
453 struct flow_match_tcp tcp;
456 flow_rule_match_tcp(rule, &tcp);
457 tcp_flags = be16_to_cpu(tcp.key->flags);
459 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
460 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
464 /* We only support PSH and URG flags when either
465 * FIN, SYN or RST is present as well.
467 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
468 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
469 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
473 /* We need to store TCP flags in the either the IPv4 or IPv6 key
474 * space, thus we need to ensure we include a IPv4/IPv6 key
475 * layer if we have not done so already.
478 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
482 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
483 !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
484 switch (basic.key->n_proto) {
485 case cpu_to_be16(ETH_P_IP):
486 key_layer |= NFP_FLOWER_LAYER_IPV4;
487 key_size += sizeof(struct nfp_flower_ipv4);
490 case cpu_to_be16(ETH_P_IPV6):
491 key_layer |= NFP_FLOWER_LAYER_IPV6;
492 key_size += sizeof(struct nfp_flower_ipv6);
496 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
502 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
503 struct flow_match_control ctl;
505 flow_rule_match_control(rule, &ctl);
506 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
507 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
512 ret_key_ls->key_layer = key_layer;
513 ret_key_ls->key_layer_two = key_layer_two;
514 ret_key_ls->key_size = key_size;
519 static struct nfp_fl_payload *
520 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
522 struct nfp_fl_payload *flow_pay;
524 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
528 flow_pay->meta.key_len = key_layer->key_size;
529 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
530 if (!flow_pay->unmasked_data)
533 flow_pay->meta.mask_len = key_layer->key_size;
534 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
535 if (!flow_pay->mask_data)
536 goto err_free_unmasked;
538 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
539 if (!flow_pay->action_data)
542 flow_pay->nfp_tun_ipv4_addr = 0;
543 flow_pay->nfp_tun_ipv6 = NULL;
544 flow_pay->meta.flags = 0;
545 INIT_LIST_HEAD(&flow_pay->linked_flows);
546 flow_pay->in_hw = false;
547 flow_pay->pre_tun_rule.dev = NULL;
552 kfree(flow_pay->mask_data);
554 kfree(flow_pay->unmasked_data);
561 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
562 struct nfp_flower_merge_check *merge,
563 u8 *last_act_id, int *act_out)
565 struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
566 struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
567 struct nfp_fl_set_ip4_addrs *ipv4_add;
568 struct nfp_fl_set_ipv6_addr *ipv6_add;
569 struct nfp_fl_push_vlan *push_vlan;
570 struct nfp_fl_pre_tunnel *pre_tun;
571 struct nfp_fl_set_tport *tport;
572 struct nfp_fl_set_eth *eth;
573 struct nfp_fl_act_head *a;
574 unsigned int act_off = 0;
575 bool ipv6_tun = false;
580 while (act_off < flow->meta.act_len) {
581 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
585 case NFP_FL_ACTION_OPCODE_OUTPUT:
589 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
590 push_vlan = (struct nfp_fl_push_vlan *)a;
591 if (push_vlan->vlan_tci)
592 merge->tci = cpu_to_be16(0xffff);
594 case NFP_FL_ACTION_OPCODE_POP_VLAN:
595 merge->tci = cpu_to_be16(0);
597 case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
598 /* New tunnel header means l2 to l4 can be matched. */
599 eth_broadcast_addr(&merge->l2.mac_dst[0]);
600 eth_broadcast_addr(&merge->l2.mac_src[0]);
601 memset(&merge->l4, 0xff,
602 sizeof(struct nfp_flower_tp_ports));
604 memset(&merge->ipv6, 0xff,
605 sizeof(struct nfp_flower_ipv6));
607 memset(&merge->ipv4, 0xff,
608 sizeof(struct nfp_flower_ipv4));
610 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
611 eth = (struct nfp_fl_set_eth *)a;
612 for (i = 0; i < ETH_ALEN; i++)
613 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
614 for (i = 0; i < ETH_ALEN; i++)
615 merge->l2.mac_src[i] |=
616 eth->eth_addr_mask[ETH_ALEN + i];
618 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
619 ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
620 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
621 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
623 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
624 ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
625 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
626 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
628 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
629 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
630 for (i = 0; i < 4; i++)
631 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
632 ipv6_add->ipv6[i].mask;
634 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
635 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
636 for (i = 0; i < 4; i++)
637 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
638 ipv6_add->ipv6[i].mask;
640 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
641 ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
642 merge->ipv6.ip_ext.ttl |=
643 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
644 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
645 merge->ipv6.ipv6_flow_label_exthdr |=
646 ipv6_tc_hl_fl->ipv6_label_mask;
648 case NFP_FL_ACTION_OPCODE_SET_UDP:
649 case NFP_FL_ACTION_OPCODE_SET_TCP:
650 tport = (struct nfp_fl_set_tport *)a;
651 ports = (u8 *)&merge->l4.port_src;
652 for (i = 0; i < 4; i++)
653 ports[i] |= tport->tp_port_mask[i];
655 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
656 pre_tun = (struct nfp_fl_pre_tunnel *)a;
657 ipv6_tun = be16_to_cpu(pre_tun->flags) &
660 case NFP_FL_ACTION_OPCODE_PRE_LAG:
661 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
667 act_off += a->len_lw << NFP_FL_LW_SIZ;
671 *last_act_id = act_id;
677 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
678 struct nfp_flower_merge_check *merge,
681 struct nfp_flower_meta_tci *meta_tci;
682 u8 *mask = flow->mask_data;
683 u8 key_layer, match_size;
685 memset(merge, 0, sizeof(struct nfp_flower_merge_check));
687 meta_tci = (struct nfp_flower_meta_tci *)mask;
688 key_layer = meta_tci->nfp_flow_key_layer;
690 if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
693 merge->tci = meta_tci->tci;
694 mask += sizeof(struct nfp_flower_meta_tci);
696 if (key_layer & NFP_FLOWER_LAYER_EXT_META)
697 mask += sizeof(struct nfp_flower_ext_meta);
699 mask += sizeof(struct nfp_flower_in_port);
701 if (key_layer & NFP_FLOWER_LAYER_MAC) {
702 match_size = sizeof(struct nfp_flower_mac_mpls);
703 memcpy(&merge->l2, mask, match_size);
707 if (key_layer & NFP_FLOWER_LAYER_TP) {
708 match_size = sizeof(struct nfp_flower_tp_ports);
709 memcpy(&merge->l4, mask, match_size);
713 if (key_layer & NFP_FLOWER_LAYER_IPV4) {
714 match_size = sizeof(struct nfp_flower_ipv4);
715 memcpy(&merge->ipv4, mask, match_size);
718 if (key_layer & NFP_FLOWER_LAYER_IPV6) {
719 match_size = sizeof(struct nfp_flower_ipv6);
720 memcpy(&merge->ipv6, mask, match_size);
727 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
728 struct nfp_fl_payload *sub_flow2)
730 /* Two flows can be merged if sub_flow2 only matches on bits that are
731 * either matched by sub_flow1 or set by a sub_flow1 action. This
732 * ensures that every packet that hits sub_flow1 and recirculates is
733 * guaranteed to hit sub_flow2.
735 struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
736 int err, act_out = 0;
739 err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
744 err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
749 err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
750 &last_act_id, &act_out);
754 /* Must only be 1 output action and it must be the last in sequence. */
755 if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
758 /* Reject merge if sub_flow2 matches on something that is not matched
759 * on or set in an action by sub_flow1.
761 err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
762 sub_flow1_merge.vals,
763 sizeof(struct nfp_flower_merge_check) * 8);
771 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
774 unsigned int act_off = 0, act_len;
775 struct nfp_fl_act_head *a;
778 while (act_off < len) {
779 a = (struct nfp_fl_act_head *)&act_src[act_off];
780 act_len = a->len_lw << NFP_FL_LW_SIZ;
784 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
788 case NFP_FL_ACTION_OPCODE_PRE_LAG:
789 memcpy(act_dst + act_off, act_src + act_off, act_len);
802 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
804 struct nfp_fl_act_head *a;
805 unsigned int act_off = 0;
807 while (act_off < len) {
808 a = (struct nfp_fl_act_head *)&acts[act_off];
810 if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
811 *vlan = (struct nfp_fl_push_vlan *)a;
812 else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
815 act_off += a->len_lw << NFP_FL_LW_SIZ;
818 /* Ensure any VLAN push also has an egress action. */
819 if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
826 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
828 struct nfp_fl_set_tun *tun;
829 struct nfp_fl_act_head *a;
830 unsigned int act_off = 0;
832 while (act_off < len) {
833 a = (struct nfp_fl_act_head *)&acts[act_off];
835 if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
836 tun = (struct nfp_fl_set_tun *)a;
837 tun->outer_vlan_tpid = vlan->vlan_tpid;
838 tun->outer_vlan_tci = vlan->vlan_tci;
843 act_off += a->len_lw << NFP_FL_LW_SIZ;
846 /* Return error if no tunnel action is found. */
851 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
852 struct nfp_fl_payload *sub_flow2,
853 struct nfp_fl_payload *merge_flow)
855 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
856 struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
857 bool tunnel_act = false;
861 /* The last action of sub_flow1 must be output - do not merge this. */
862 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
863 sub2_act_len = sub_flow2->meta.act_len;
868 if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
871 /* A shortcut can only be applied if there is a single action. */
873 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
875 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
877 merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
878 merge_act = merge_flow->action_data;
880 /* Copy any pre-actions to the start of merge flow action list. */
881 pre_off1 = nfp_flower_copy_pre_actions(merge_act,
882 sub_flow1->action_data,
883 sub1_act_len, &tunnel_act);
884 merge_act += pre_off1;
885 sub1_act_len -= pre_off1;
886 pre_off2 = nfp_flower_copy_pre_actions(merge_act,
887 sub_flow2->action_data,
889 merge_act += pre_off2;
890 sub2_act_len -= pre_off2;
892 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
893 * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
897 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
899 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
900 &post_tun_push_vlan);
904 if (post_tun_push_vlan) {
905 pre_off2 += sizeof(*post_tun_push_vlan);
906 sub2_act_len -= sizeof(*post_tun_push_vlan);
910 /* Copy remaining actions from sub_flows 1 and 2. */
911 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
913 if (post_tun_push_vlan) {
914 /* Update tunnel action in merge to include VLAN push. */
915 err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
920 merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
923 merge_act += sub1_act_len;
924 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
929 /* Flow link code should only be accessed under RTNL. */
930 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
932 list_del(&link->merge_flow.list);
933 list_del(&link->sub_flow.list);
937 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
938 struct nfp_fl_payload *sub_flow)
940 struct nfp_fl_payload_link *link;
942 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
943 if (link->sub_flow.flow == sub_flow) {
944 nfp_flower_unlink_flow(link);
949 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
950 struct nfp_fl_payload *sub_flow)
952 struct nfp_fl_payload_link *link;
954 link = kmalloc(sizeof(*link), GFP_KERNEL);
958 link->merge_flow.flow = merge_flow;
959 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
960 link->sub_flow.flow = sub_flow;
961 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
967 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
968 * @app: Pointer to the APP handle
969 * @sub_flow1: Initial flow matched to produce merge hint
970 * @sub_flow2: Post recirculation flow matched in merge hint
972 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
973 * and offloading the new, merged flow.
975 * Return: negative value on error, 0 in success.
977 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
978 struct nfp_fl_payload *sub_flow1,
979 struct nfp_fl_payload *sub_flow2)
981 struct flow_cls_offload merge_tc_off;
982 struct nfp_flower_priv *priv = app->priv;
983 struct netlink_ext_ack *extack = NULL;
984 struct nfp_fl_payload *merge_flow;
985 struct nfp_fl_key_ls merge_key_ls;
990 extack = merge_tc_off.common.extack;
991 if (sub_flow1 == sub_flow2 ||
992 nfp_flower_is_merge_flow(sub_flow1) ||
993 nfp_flower_is_merge_flow(sub_flow2))
996 err = nfp_flower_can_merge(sub_flow1, sub_flow2);
1000 merge_key_ls.key_size = sub_flow1->meta.key_len;
1002 merge_flow = nfp_flower_allocate_new(&merge_key_ls);
1006 merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
1007 merge_flow->ingress_dev = sub_flow1->ingress_dev;
1009 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
1010 sub_flow1->meta.key_len);
1011 memcpy(merge_flow->mask_data, sub_flow1->mask_data,
1012 sub_flow1->meta.mask_len);
1014 err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
1016 goto err_destroy_merge_flow;
1018 err = nfp_flower_link_flows(merge_flow, sub_flow1);
1020 goto err_destroy_merge_flow;
1022 err = nfp_flower_link_flows(merge_flow, sub_flow2);
1024 goto err_unlink_sub_flow1;
1026 merge_tc_off.cookie = merge_flow->tc_flower_cookie;
1027 err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
1028 merge_flow->ingress_dev, extack);
1030 goto err_unlink_sub_flow2;
1032 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
1033 nfp_flower_table_params);
1035 goto err_release_metadata;
1037 err = nfp_flower_xmit_flow(app, merge_flow,
1038 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1040 goto err_remove_rhash;
1042 merge_flow->in_hw = true;
1043 sub_flow1->in_hw = false;
1048 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1049 &merge_flow->fl_node,
1050 nfp_flower_table_params));
1051 err_release_metadata:
1052 nfp_modify_flow_metadata(app, merge_flow);
1053 err_unlink_sub_flow2:
1054 nfp_flower_unlink_flows(merge_flow, sub_flow2);
1055 err_unlink_sub_flow1:
1056 nfp_flower_unlink_flows(merge_flow, sub_flow1);
1057 err_destroy_merge_flow:
1058 kfree(merge_flow->action_data);
1059 kfree(merge_flow->mask_data);
1060 kfree(merge_flow->unmasked_data);
1066 * nfp_flower_validate_pre_tun_rule()
1067 * @app: Pointer to the APP handle
1068 * @flow: Pointer to NFP flow representation of rule
1069 * @extack: Netlink extended ACK report
1071 * Verifies the flow as a pre-tunnel rule.
1073 * Return: negative value on error, 0 if verified.
1076 nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1077 struct nfp_fl_payload *flow,
1078 struct netlink_ext_ack *extack)
1080 struct nfp_flower_meta_tci *meta_tci;
1081 struct nfp_flower_mac_mpls *mac;
1082 struct nfp_fl_act_head *act;
1083 u8 *mask = flow->mask_data;
1088 meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1089 if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
1090 u16 vlan_tci = be16_to_cpu(meta_tci->tci);
1092 vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1093 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1096 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1099 key_layer = meta_tci->nfp_flow_key_layer;
1100 if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
1101 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
1105 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1106 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
1110 /* Skip fields known to exist. */
1111 mask += sizeof(struct nfp_flower_meta_tci);
1112 mask += sizeof(struct nfp_flower_in_port);
1114 /* Ensure destination MAC address is fully matched. */
1115 mac = (struct nfp_flower_mac_mpls *)mask;
1116 if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
1117 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1121 if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
1122 key_layer & NFP_FLOWER_LAYER_IPV6) {
1123 /* Flags and proto fields have same offset in IPv4 and IPv6. */
1124 int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
1125 int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
1129 size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
1130 sizeof(struct nfp_flower_ipv4) :
1131 sizeof(struct nfp_flower_ipv6);
1133 mask += sizeof(struct nfp_flower_mac_mpls);
1135 /* Ensure proto and flags are the only IP layer fields. */
1136 for (i = 0; i < size; i++)
1137 if (mask[i] && i != ip_flags && i != ip_proto) {
1138 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1143 /* Action must be a single egress or pop_vlan and egress. */
1145 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1147 if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
1148 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1152 act_offset += act->len_lw << NFP_FL_LW_SIZ;
1153 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1156 if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
1157 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1161 act_offset += act->len_lw << NFP_FL_LW_SIZ;
1163 /* Ensure there are no more actions after egress. */
1164 if (act_offset != flow->meta.act_len) {
1165 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
1173 * nfp_flower_add_offload() - Adds a new flow to hardware.
1174 * @app: Pointer to the APP handle
1175 * @netdev: netdev structure.
1176 * @flow: TC flower classifier offload structure.
1178 * Adds a new flow to the repeated hash structure and action payload.
1180 * Return: negative value on error, 0 if configured successfully.
1183 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1184 struct flow_cls_offload *flow)
1186 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1187 struct nfp_flower_priv *priv = app->priv;
1188 struct netlink_ext_ack *extack = NULL;
1189 struct nfp_fl_payload *flow_pay;
1190 struct nfp_fl_key_ls *key_layer;
1191 struct nfp_port *port = NULL;
1194 extack = flow->common.extack;
1195 if (nfp_netdev_is_nfp_repr(netdev))
1196 port = nfp_port_from_netdev(netdev);
1198 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1202 err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
1205 goto err_free_key_ls;
1207 flow_pay = nfp_flower_allocate_new(key_layer);
1210 goto err_free_key_ls;
1213 err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
1214 flow_pay, tun_type, extack);
1216 goto err_destroy_flow;
1218 err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
1220 goto err_destroy_flow;
1222 if (flow_pay->pre_tun_rule.dev) {
1223 err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
1225 goto err_destroy_flow;
1228 err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
1230 goto err_destroy_flow;
1232 flow_pay->tc_flower_cookie = flow->cookie;
1233 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1234 nfp_flower_table_params);
1236 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1237 goto err_release_metadata;
1240 if (flow_pay->pre_tun_rule.dev)
1241 err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
1243 err = nfp_flower_xmit_flow(app, flow_pay,
1244 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1246 goto err_remove_rhash;
1249 port->tc_offload_cnt++;
1251 flow_pay->in_hw = true;
1253 /* Deallocate flow payload when flower rule has been destroyed. */
1259 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1261 nfp_flower_table_params));
1262 err_release_metadata:
1263 nfp_modify_flow_metadata(app, flow_pay);
1265 if (flow_pay->nfp_tun_ipv6)
1266 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1267 kfree(flow_pay->action_data);
1268 kfree(flow_pay->mask_data);
1269 kfree(flow_pay->unmasked_data);
1277 nfp_flower_remove_merge_flow(struct nfp_app *app,
1278 struct nfp_fl_payload *del_sub_flow,
1279 struct nfp_fl_payload *merge_flow)
1281 struct nfp_flower_priv *priv = app->priv;
1282 struct nfp_fl_payload_link *link, *temp;
1283 struct nfp_fl_payload *origin;
1287 link = list_first_entry(&merge_flow->linked_flows,
1288 struct nfp_fl_payload_link, merge_flow.list);
1289 origin = link->sub_flow.flow;
1291 /* Re-add rule the merge had overwritten if it has not been deleted. */
1292 if (origin != del_sub_flow)
1295 err = nfp_modify_flow_metadata(app, merge_flow);
1297 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1298 goto err_free_links;
1302 err = nfp_flower_xmit_flow(app, merge_flow,
1303 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1305 nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1306 goto err_free_links;
1309 __nfp_modify_flow_metadata(priv, origin);
1310 err = nfp_flower_xmit_flow(app, origin,
1311 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1313 nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1314 origin->in_hw = true;
1318 /* Clean any links connected with the merged flow. */
1319 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1321 nfp_flower_unlink_flow(link);
1323 kfree(merge_flow->action_data);
1324 kfree(merge_flow->mask_data);
1325 kfree(merge_flow->unmasked_data);
1326 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1327 &merge_flow->fl_node,
1328 nfp_flower_table_params));
1329 kfree_rcu(merge_flow, rcu);
1333 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1334 struct nfp_fl_payload *sub_flow)
1336 struct nfp_fl_payload_link *link, *temp;
1338 /* Remove any merge flow formed from the deleted sub_flow. */
1339 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1341 nfp_flower_remove_merge_flow(app, sub_flow,
1342 link->merge_flow.flow);
1346 * nfp_flower_del_offload() - Removes a flow from hardware.
1347 * @app: Pointer to the APP handle
1348 * @netdev: netdev structure.
1349 * @flow: TC flower classifier offload structure
1351 * Removes a flow from the repeated hash structure and clears the
1352 * action payload. Any flows merged from this are also deleted.
1354 * Return: negative value on error, 0 if removed successfully.
1357 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1358 struct flow_cls_offload *flow)
1360 struct nfp_flower_priv *priv = app->priv;
1361 struct netlink_ext_ack *extack = NULL;
1362 struct nfp_fl_payload *nfp_flow;
1363 struct nfp_port *port = NULL;
1366 extack = flow->common.extack;
1367 if (nfp_netdev_is_nfp_repr(netdev))
1368 port = nfp_port_from_netdev(netdev);
1370 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1372 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1376 err = nfp_modify_flow_metadata(app, nfp_flow);
1378 goto err_free_merge_flow;
1380 if (nfp_flow->nfp_tun_ipv4_addr)
1381 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1383 if (nfp_flow->nfp_tun_ipv6)
1384 nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
1386 if (!nfp_flow->in_hw) {
1388 goto err_free_merge_flow;
1391 if (nfp_flow->pre_tun_rule.dev)
1392 err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
1394 err = nfp_flower_xmit_flow(app, nfp_flow,
1395 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1396 /* Fall through on error. */
1398 err_free_merge_flow:
1399 nfp_flower_del_linked_merge_flows(app, nfp_flow);
1401 port->tc_offload_cnt--;
1402 kfree(nfp_flow->action_data);
1403 kfree(nfp_flow->mask_data);
1404 kfree(nfp_flow->unmasked_data);
1405 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1407 nfp_flower_table_params));
1408 kfree_rcu(nfp_flow, rcu);
1413 __nfp_flower_update_merge_stats(struct nfp_app *app,
1414 struct nfp_fl_payload *merge_flow)
1416 struct nfp_flower_priv *priv = app->priv;
1417 struct nfp_fl_payload_link *link;
1418 struct nfp_fl_payload *sub_flow;
1419 u64 pkts, bytes, used;
1422 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1423 pkts = priv->stats[ctx_id].pkts;
1424 /* Do not cycle subflows if no stats to distribute. */
1427 bytes = priv->stats[ctx_id].bytes;
1428 used = priv->stats[ctx_id].used;
1430 /* Reset stats for the merge flow. */
1431 priv->stats[ctx_id].pkts = 0;
1432 priv->stats[ctx_id].bytes = 0;
1434 /* The merge flow has received stats updates from firmware.
1435 * Distribute these stats to all subflows that form the merge.
1436 * The stats will collected from TC via the subflows.
1438 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1439 sub_flow = link->sub_flow.flow;
1440 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1441 priv->stats[ctx_id].pkts += pkts;
1442 priv->stats[ctx_id].bytes += bytes;
1443 max_t(u64, priv->stats[ctx_id].used, used);
1448 nfp_flower_update_merge_stats(struct nfp_app *app,
1449 struct nfp_fl_payload *sub_flow)
1451 struct nfp_fl_payload_link *link;
1453 /* Get merge flows that the subflow forms to distribute their stats. */
1454 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1455 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1459 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1460 * @app: Pointer to the APP handle
1461 * @netdev: Netdev structure.
1462 * @flow: TC flower classifier offload structure
1464 * Populates a flow statistics structure which which corresponds to a
1467 * Return: negative value on error, 0 if stats populated successfully.
1470 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1471 struct flow_cls_offload *flow)
1473 struct nfp_flower_priv *priv = app->priv;
1474 struct netlink_ext_ack *extack = NULL;
1475 struct nfp_fl_payload *nfp_flow;
1478 extack = flow->common.extack;
1479 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1481 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1485 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1487 spin_lock_bh(&priv->stats_lock);
1488 /* If request is for a sub_flow, update stats from merged flows. */
1489 if (!list_empty(&nfp_flow->linked_flows))
1490 nfp_flower_update_merge_stats(app, nfp_flow);
1492 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1493 priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1495 priv->stats[ctx_id].pkts = 0;
1496 priv->stats[ctx_id].bytes = 0;
1497 spin_unlock_bh(&priv->stats_lock);
1503 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1504 struct flow_cls_offload *flower)
1506 if (!eth_proto_is_802_3(flower->common.protocol))
1509 switch (flower->command) {
1510 case FLOW_CLS_REPLACE:
1511 return nfp_flower_add_offload(app, netdev, flower);
1512 case FLOW_CLS_DESTROY:
1513 return nfp_flower_del_offload(app, netdev, flower);
1514 case FLOW_CLS_STATS:
1515 return nfp_flower_get_stats(app, netdev, flower);
1521 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1522 void *type_data, void *cb_priv)
1524 struct nfp_repr *repr = cb_priv;
1526 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1530 case TC_SETUP_CLSFLOWER:
1531 return nfp_flower_repr_offload(repr->app, repr->netdev,
1533 case TC_SETUP_CLSMATCHALL:
1534 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1541 static LIST_HEAD(nfp_block_cb_list);
1543 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1544 struct flow_block_offload *f)
1546 struct nfp_repr *repr = netdev_priv(netdev);
1547 struct nfp_flower_repr_priv *repr_priv;
1548 struct flow_block_cb *block_cb;
1550 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1553 repr_priv = repr->app_priv;
1554 repr_priv->block_shared = f->block_shared;
1555 f->driver_block_list = &nfp_block_cb_list;
1557 switch (f->command) {
1558 case FLOW_BLOCK_BIND:
1559 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1560 &nfp_block_cb_list))
1563 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1565 if (IS_ERR(block_cb))
1566 return PTR_ERR(block_cb);
1568 flow_block_cb_add(block_cb, f);
1569 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1571 case FLOW_BLOCK_UNBIND:
1572 block_cb = flow_block_cb_lookup(f->block,
1573 nfp_flower_setup_tc_block_cb,
1578 flow_block_cb_remove(block_cb, f);
1579 list_del(&block_cb->driver_list);
1586 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1587 enum tc_setup_type type, void *type_data)
1590 case TC_SETUP_BLOCK:
1591 return nfp_flower_setup_tc_block(netdev, type_data);
1597 struct nfp_flower_indr_block_cb_priv {
1598 struct net_device *netdev;
1599 struct nfp_app *app;
1600 struct list_head list;
1603 static struct nfp_flower_indr_block_cb_priv *
1604 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1605 struct net_device *netdev)
1607 struct nfp_flower_indr_block_cb_priv *cb_priv;
1608 struct nfp_flower_priv *priv = app->priv;
1610 /* All callback list access should be protected by RTNL. */
1613 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1614 if (cb_priv->netdev == netdev)
1620 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1621 void *type_data, void *cb_priv)
1623 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1624 struct flow_cls_offload *flower = type_data;
1626 if (flower->common.chain_index)
1630 case TC_SETUP_CLSFLOWER:
1631 return nfp_flower_repr_offload(priv->app, priv->netdev,
1638 static void nfp_flower_setup_indr_tc_release(void *cb_priv)
1640 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1642 list_del(&priv->list);
1647 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1648 struct flow_block_offload *f)
1650 struct nfp_flower_indr_block_cb_priv *cb_priv;
1651 struct nfp_flower_priv *priv = app->priv;
1652 struct flow_block_cb *block_cb;
1654 if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1655 !nfp_flower_internal_port_can_offload(app, netdev)) ||
1656 (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1657 nfp_flower_internal_port_can_offload(app, netdev)))
1660 switch (f->command) {
1661 case FLOW_BLOCK_BIND:
1662 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1664 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1666 &nfp_block_cb_list))
1669 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1673 cb_priv->netdev = netdev;
1675 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1677 block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1679 nfp_flower_setup_indr_tc_release);
1680 if (IS_ERR(block_cb)) {
1681 list_del(&cb_priv->list);
1683 return PTR_ERR(block_cb);
1686 flow_block_cb_add(block_cb, f);
1687 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1689 case FLOW_BLOCK_UNBIND:
1690 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1694 block_cb = flow_block_cb_lookup(f->block,
1695 nfp_flower_setup_indr_block_cb,
1700 flow_block_cb_remove(block_cb, f);
1701 list_del(&block_cb->driver_list);
1710 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1711 enum tc_setup_type type, void *type_data)
1714 case TC_SETUP_BLOCK:
1715 return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1722 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1723 struct net_device *netdev,
1724 unsigned long event)
1728 if (!nfp_fl_is_netdev_to_offload(netdev))
1731 if (event == NETDEV_REGISTER) {
1732 err = __flow_indr_block_cb_register(netdev, app,
1733 nfp_flower_indr_setup_tc_cb,
1736 nfp_flower_cmsg_warn(app,
1737 "Indirect block reg failed - %s\n",
1739 } else if (event == NETDEV_UNREGISTER) {
1740 __flow_indr_block_cb_unregister(netdev,
1741 nfp_flower_indr_setup_tc_cb,