1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
7 #include <net/tc_act/tc_gate.h>
10 #include "sparx5_tc.h"
12 #include "vcap_api_client.h"
14 #include "sparx5_main.h"
15 #include "sparx5_vcap_impl.h"
17 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */
19 /* Collect keysets and type ids for multiple rules per size */
20 struct sparx5_wildcard_rule {
24 enum vcap_keyfield_set keyset;
27 struct sparx5_multiple_rules {
28 struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
31 struct sparx5_tc_flower_template {
32 struct list_head list; /* for insertion in the list of templates */
33 int cid; /* chain id */
34 enum vcap_keyfield_set orig; /* keyset used before the template */
35 enum vcap_keyfield_set keyset; /* new keyset used by template */
36 u16 l3_proto; /* protocol specified in the template */
39 /* SparX-5 VCAP fragment types:
40 * 0 = no fragment, 1 = initial fragment,
41 * 2 = suspicious fragment, 3 = valid follow-up fragment
43 enum { /* key / mask */
44 FRAG_NOT = 0x03, /* 0 / 3 */
45 FRAG_SOME = 0x11, /* 1 / 1 */
46 FRAG_FIRST = 0x13, /* 1 / 3 */
47 FRAG_LATER = 0x33, /* 3 / 3 */
48 FRAG_INVAL = 0xff, /* invalid */
51 /* Flower fragment flag to VCAP fragment type mapping */
52 static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
53 { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
54 { FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
55 { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
56 { FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
57 /* 0/0 0/1 1/0 1/1 <-- first_frag */
61 sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
67 err = vcap_rule_add_key_u32(st->vrule,
69 SPX5_TPID_SEL_8100, ~0);
72 err = vcap_rule_add_key_u32(st->vrule,
74 SPX5_TPID_SEL_88A8, ~0);
77 NL_SET_ERR_MSG_MOD(st->fco->common.extack,
78 "Invalid vlan proto");
86 sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
88 struct flow_match_basic mt;
91 flow_rule_match_basic(st->frule, &mt);
93 if (mt.mask->n_proto) {
94 st->l3_proto = be16_to_cpu(mt.key->n_proto);
95 if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) {
96 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
100 } else if (st->l3_proto == ETH_P_IP) {
101 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
105 } else if (st->l3_proto == ETH_P_IPV6) {
106 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
110 if (st->admin->vtype == VCAP_TYPE_IS0) {
111 err = vcap_rule_add_key_bit(st->vrule,
120 if (mt.mask->ip_proto) {
121 st->l4_proto = mt.key->ip_proto;
122 if (st->l4_proto == IPPROTO_TCP) {
123 err = vcap_rule_add_key_bit(st->vrule,
128 } else if (st->l4_proto == IPPROTO_UDP) {
129 err = vcap_rule_add_key_bit(st->vrule,
134 if (st->admin->vtype == VCAP_TYPE_IS0) {
135 err = vcap_rule_add_key_bit(st->vrule,
142 err = vcap_rule_add_key_u32(st->vrule,
150 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_BASIC);
155 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
160 sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
162 struct flow_match_control mt;
166 flow_rule_match_control(st->frule, &mt);
168 if (mt.mask->flags) {
169 u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
170 u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
171 u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
173 u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
174 u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
175 u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
177 /* Lookup verdict based on the 2 + 2 input bits */
178 u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
180 if (vdt == FRAG_INVAL) {
181 NL_SET_ERR_MSG_MOD(st->fco->common.extack,
182 "Match on invalid fragment flag combination");
186 /* Extract VCAP fragment key and mask from verdict */
187 value = (vdt >> 4) & 0x3;
190 err = vcap_rule_add_key_u32(st->vrule,
191 VCAP_KF_L3_FRAGMENT_TYPE,
197 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
202 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
207 sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
209 if (st->admin->vtype != VCAP_TYPE_IS0) {
210 NL_SET_ERR_MSG_MOD(st->fco->common.extack,
211 "cvlan not supported in this VCAP");
215 return vcap_tc_flower_handler_cvlan_usage(st);
219 sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
221 enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
222 enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
225 if (st->admin->vtype == VCAP_TYPE_IS0) {
226 vid_key = VCAP_KF_8021Q_VID0;
227 pcp_key = VCAP_KF_8021Q_PCP0;
230 err = vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
234 if (st->admin->vtype == VCAP_TYPE_ES0 && st->tpid)
235 err = sparx5_tc_flower_es0_tpid(st);
240 static int (*sparx5_tc_flower_usage_handlers[])(struct vcap_tc_flower_parse_usage *st) = {
241 [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
242 [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
243 [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
244 [FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
245 [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
246 [FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
247 [FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
248 [FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
249 [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
250 [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
251 [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
254 static int sparx5_tc_use_dissectors(struct vcap_tc_flower_parse_usage *st,
255 struct vcap_admin *admin,
256 struct vcap_rule *vrule)
260 for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
261 if (!flow_rule_match_key(st->frule, idx))
263 if (!sparx5_tc_flower_usage_handlers[idx])
265 err = sparx5_tc_flower_usage_handlers[idx](st);
270 if (st->frule->match.dissector->used_keys ^ st->used_keys) {
271 NL_SET_ERR_MSG_MOD(st->fco->common.extack,
272 "Unsupported match item");
279 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
280 struct net_device *ndev,
281 struct flow_cls_offload *fco,
284 struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
285 struct flow_action_entry *actent, *last_actent = NULL;
286 struct flow_action *act = &rule->action;
290 if (!flow_action_has_entries(act)) {
291 NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions");
295 if (!flow_action_basic_hw_stats_check(act, fco->common.extack))
298 flow_action_for_each(idx, actent, act) {
299 if (action_mask & BIT(actent->id)) {
300 NL_SET_ERR_MSG_MOD(fco->common.extack,
301 "More actions of the same type");
304 action_mask |= BIT(actent->id);
305 last_actent = actent; /* Save last action for later check */
308 /* Check if last action is a goto
309 * The last chain/lookup does not need to have a goto action
311 if (last_actent->id == FLOW_ACTION_GOTO) {
312 /* Check if the destination chain is in one of the VCAPs */
313 if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
314 last_actent->chain_index)) {
315 NL_SET_ERR_MSG_MOD(fco->common.extack,
316 "Invalid goto chain");
319 } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
321 NL_SET_ERR_MSG_MOD(fco->common.extack,
322 "Last action must be 'goto'");
326 /* Catch unsupported combinations of actions */
327 if (action_mask & BIT(FLOW_ACTION_TRAP) &&
328 action_mask & BIT(FLOW_ACTION_ACCEPT)) {
329 NL_SET_ERR_MSG_MOD(fco->common.extack,
330 "Cannot combine pass and trap action");
334 if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
335 action_mask & BIT(FLOW_ACTION_VLAN_POP)) {
336 NL_SET_ERR_MSG_MOD(fco->common.extack,
337 "Cannot combine vlan push and pop action");
341 if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
342 action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
343 NL_SET_ERR_MSG_MOD(fco->common.extack,
344 "Cannot combine vlan push and modify action");
348 if (action_mask & BIT(FLOW_ACTION_VLAN_POP) &&
349 action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
350 NL_SET_ERR_MSG_MOD(fco->common.extack,
351 "Cannot combine vlan pop and modify action");
358 /* Add a rule counter action */
359 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
360 struct vcap_rule *vrule)
364 switch (admin->vtype) {
368 err = vcap_rule_mod_action_u32(vrule, VCAP_AF_ESDX,
372 vcap_rule_set_counter_id(vrule, vrule->id);
376 err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
380 vcap_rule_set_counter_id(vrule, vrule->id);
383 pr_err("%s:%d: vcap type: %d not supported\n",
384 __func__, __LINE__, admin->vtype);
390 /* Collect all port keysets and apply the first of them, possibly wildcarded */
391 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev,
392 struct vcap_rule *vrule,
393 struct vcap_admin *admin,
395 struct sparx5_multiple_rules *multi)
397 struct sparx5_port *port = netdev_priv(ndev);
398 struct vcap_keyset_list portkeysetlist = {};
399 enum vcap_keyfield_set portkeysets[10] = {};
400 struct vcap_keyset_list matches = {};
401 enum vcap_keyfield_set keysets[10];
402 int idx, jdx, err = 0, count = 0;
403 struct sparx5_wildcard_rule *mru;
404 const struct vcap_set *kinfo;
405 struct vcap_control *vctrl;
407 vctrl = port->sparx5->vcap_ctrl;
409 /* Find the keysets that the rule can use */
410 matches.keysets = keysets;
411 matches.max = ARRAY_SIZE(keysets);
412 if (!vcap_rule_find_keysets(vrule, &matches))
415 /* Find the keysets that the port configuration supports */
416 portkeysetlist.max = ARRAY_SIZE(portkeysets);
417 portkeysetlist.keysets = portkeysets;
418 err = sparx5_vcap_get_port_keyset(ndev,
419 admin, vrule->vcap_chain_id,
425 /* Find the intersection of the two sets of keyset */
426 for (idx = 0; idx < portkeysetlist.cnt; ++idx) {
427 kinfo = vcap_keyfieldset(vctrl, admin->vtype,
428 portkeysetlist.keysets[idx]);
432 /* Find a port keyset that matches the required keys
433 * If there are multiple keysets then compose a type id mask
435 for (jdx = 0; jdx < matches.cnt; ++jdx) {
436 if (portkeysetlist.keysets[idx] != matches.keysets[jdx])
439 mru = &multi->rule[kinfo->sw_per_item];
440 if (!mru->selected) {
441 mru->selected = true;
442 mru->keyset = portkeysetlist.keysets[idx];
443 mru->value = kinfo->type_id;
445 mru->value &= kinfo->type_id;
446 mru->mask |= kinfo->type_id;
453 if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt)
456 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
457 mru = &multi->rule[idx];
461 /* Align the mask to the combined value */
462 mru->mask ^= mru->value;
465 /* Set the chosen keyset on the rule and set a wildcarded type if there
466 * are more than one keyset
468 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
469 mru = &multi->rule[idx];
473 vcap_set_rule_set_keyset(vrule, mru->keyset);
475 /* Some keysets do not have a type field */
476 vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE,
479 mru->selected = false; /* mark as done */
480 break; /* Stop here and add more rules later */
485 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl,
486 struct flow_cls_offload *fco,
487 struct vcap_rule *erule,
488 struct vcap_admin *admin,
489 struct sparx5_wildcard_rule *rule)
491 enum vcap_key_field keylist[] = {
492 VCAP_KF_IF_IGR_PORT_MASK,
493 VCAP_KF_IF_IGR_PORT_MASK_SEL,
494 VCAP_KF_IF_IGR_PORT_MASK_RNG,
495 VCAP_KF_LOOKUP_FIRST_IS,
498 struct vcap_rule *vrule;
501 /* Add an extra rule with a special user and the new keyset */
502 erule->user = VCAP_USER_TC_EXTRA;
503 vrule = vcap_copy_rule(erule);
505 return PTR_ERR(vrule);
507 /* Link the new rule to the existing rule with the cookie */
508 vrule->cookie = erule->cookie;
509 vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true);
510 err = vcap_set_rule_set_keyset(vrule, rule->keyset);
512 pr_err("%s:%d: could not set keyset %s in rule: %u\n",
514 vcap_keyset_name(vctrl, rule->keyset),
519 /* Some keysets do not have a type field, so ignore return value */
520 vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask);
522 err = vcap_set_rule_set_actionset(vrule, erule->actionset);
526 err = sparx5_tc_add_rule_counter(admin, vrule);
530 err = vcap_val_rule(vrule, ETH_P_ALL);
532 pr_err("%s:%d: could not validate rule: %u\n",
533 __func__, __LINE__, vrule->id);
534 vcap_set_tc_exterr(fco, vrule);
537 err = vcap_add_rule(vrule);
539 pr_err("%s:%d: could not add rule: %u\n",
540 __func__, __LINE__, vrule->id);
544 vcap_free_rule(vrule);
548 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
549 struct flow_cls_offload *fco,
550 struct vcap_rule *erule,
551 struct vcap_admin *admin,
552 struct sparx5_multiple_rules *multi)
556 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) {
557 if (!multi->rule[idx].selected)
560 err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin,
568 /* Add the actionset that is the default for the VCAP type */
569 static int sparx5_tc_set_actionset(struct vcap_admin *admin,
570 struct vcap_rule *vrule)
572 enum vcap_actionfield_set aset;
575 switch (admin->vtype) {
577 aset = VCAP_AFS_CLASSIFICATION;
580 aset = VCAP_AFS_BASE_TYPE;
586 aset = VCAP_AFS_BASE_TYPE;
589 pr_err("%s:%d: %s\n", __func__, __LINE__, "Invalid VCAP type");
592 /* Do not overwrite any current actionset */
593 if (vrule->actionset == VCAP_AFS_NO_VALUE)
594 err = vcap_set_rule_set_actionset(vrule, aset);
598 /* Add the VCAP key to match on for a rule target value */
599 static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
600 struct vcap_rule *vrule,
603 int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
609 switch (admin->vtype) {
611 /* Add NXT_IDX key for chaining rules between IS0 instances */
612 err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
617 return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
618 link_val, /* target */
621 /* Add PAG key for chaining rules from IS0 */
622 return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
623 link_val, /* target */
627 /* Add ISDX key for chaining rules from IS0 */
628 return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
636 /* Add the VCAP action that adds a target value to a rule */
637 static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
638 struct vcap_admin *admin,
639 struct vcap_rule *vrule,
640 int from_cid, int to_cid)
642 struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
646 pr_err("%s:%d: unsupported chain direction: %d\n",
647 __func__, __LINE__, to_cid);
651 diff = vcap_chain_offset(vctrl, from_cid, to_cid);
655 if (admin->vtype == VCAP_TYPE_IS0 &&
656 to_admin->vtype == VCAP_TYPE_IS0) {
657 /* Between IS0 instances the G_IDX value is used */
658 err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
661 err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
665 } else if (admin->vtype == VCAP_TYPE_IS0 &&
666 to_admin->vtype == VCAP_TYPE_IS2) {
667 /* Between IS0 and IS2 the PAG value is used */
668 err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
671 err = vcap_rule_add_action_u32(vrule,
672 VCAP_AF_PAG_OVERRIDE_MASK,
676 } else if (admin->vtype == VCAP_TYPE_IS0 &&
677 (to_admin->vtype == VCAP_TYPE_ES0 ||
678 to_admin->vtype == VCAP_TYPE_ES2)) {
679 /* Between IS0 and ES0/ES2 the ISDX value is used */
680 err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
684 err = vcap_rule_add_action_bit(vrule,
685 VCAP_AF_ISDX_ADD_REPLACE_SEL,
690 pr_err("%s:%d: unsupported chain destination: %d\n",
691 __func__, __LINE__, to_cid);
698 static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg *sg,
699 struct flow_action_entry *act,
700 struct netlink_ext_ack *extack)
704 if (act->gate.prio < -1 || act->gate.prio > SPX5_PSFP_SG_MAX_IPV) {
705 NL_SET_ERR_MSG_MOD(extack, "Invalid gate priority");
709 if (act->gate.cycletime < SPX5_PSFP_SG_MIN_CYCLE_TIME_NS ||
710 act->gate.cycletime > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
711 NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletime");
715 if (act->gate.cycletimeext > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
716 NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletimeext");
720 if (act->gate.num_entries >= SPX5_PSFP_GCE_CNT) {
721 NL_SET_ERR_MSG_MOD(extack, "Invalid number of gate entries");
725 sg->gate_state = true;
726 sg->ipv = act->gate.prio;
727 sg->num_entries = act->gate.num_entries;
728 sg->cycletime = act->gate.cycletime;
729 sg->cycletimeext = act->gate.cycletimeext;
731 for (i = 0; i < sg->num_entries; i++) {
732 sg->gce[i].gate_state = !!act->gate.entries[i].gate_state;
733 sg->gce[i].interval = act->gate.entries[i].interval;
734 sg->gce[i].ipv = act->gate.entries[i].ipv;
735 sg->gce[i].maxoctets = act->gate.entries[i].maxoctets;
741 static int sparx5_tc_flower_parse_act_police(struct sparx5_policer *pol,
742 struct flow_action_entry *act,
743 struct netlink_ext_ack *extack)
745 pol->type = SPX5_POL_SERVICE;
746 pol->rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
747 pol->burst = act->police.burst;
748 pol->idx = act->hw_index;
750 /* rate is now in kbit */
751 if (pol->rate > DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX, 1000)) {
752 NL_SET_ERR_MSG_MOD(extack, "Maximum rate exceeded");
756 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
757 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop");
761 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
762 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
763 NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok");
770 static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
771 struct vcap_rule *vrule, int sg_idx,
772 int pol_idx, struct sparx5_psfp_sg *sg,
773 struct sparx5_psfp_fm *fm,
774 struct sparx5_psfp_sf *sf)
776 u32 psfp_sfid = 0, psfp_fmid = 0, psfp_sgid = 0;
779 /* Must always have a stream gate - max sdu (filter option) is evaluated
780 * after frames have passed the gate, so in case of only a policer, we
781 * allocate a stream gate that is always open.
784 sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
785 sg->ipv = 0; /* Disabled */
786 sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
788 sg->gate_state = 1; /* Open */
789 sg->gate_enabled = 1;
790 sg->gce[0].gate_state = 1;
791 sg->gce[0].interval = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
793 sg->gce[0].maxoctets = 0; /* Disabled */
796 ret = sparx5_psfp_sg_add(sparx5, sg_idx, sg, &psfp_sgid);
801 /* Add new flow-meter */
802 ret = sparx5_psfp_fm_add(sparx5, pol_idx, fm, &psfp_fmid);
807 /* Map stream filter to stream gate */
808 sf->sgid = psfp_sgid;
810 /* Add new stream-filter and map it to a steam gate */
811 ret = sparx5_psfp_sf_add(sparx5, sf, &psfp_sfid);
815 /* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
816 sparx5_isdx_conf_set(sparx5, psfp_sfid, psfp_sfid, psfp_fmid);
818 ret = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_ADD_REPLACE_SEL,
823 ret = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL, psfp_sfid);
830 /* Handle the action trap for a VCAP rule */
831 static int sparx5_tc_action_trap(struct vcap_admin *admin,
832 struct vcap_rule *vrule,
833 struct flow_cls_offload *fco)
837 switch (admin->vtype) {
839 err = vcap_rule_add_action_bit(vrule,
840 VCAP_AF_CPU_COPY_ENA,
844 err = vcap_rule_add_action_u32(vrule,
845 VCAP_AF_CPU_QUEUE_NUM, 0);
848 err = vcap_rule_add_action_u32(vrule,
850 SPX5_PMM_REPLACE_ALL);
853 err = vcap_rule_add_action_u32(vrule,
855 SPX5_FWSEL_REDIRECT_TO_LOOPBACK);
858 err = vcap_rule_add_action_bit(vrule,
859 VCAP_AF_CPU_COPY_ENA,
863 err = vcap_rule_add_action_u32(vrule,
864 VCAP_AF_CPU_QUEUE_NUM, 0);
867 NL_SET_ERR_MSG_MOD(fco->common.extack,
868 "Trap action not supported in this VCAP");
875 static int sparx5_tc_action_vlan_pop(struct vcap_admin *admin,
876 struct vcap_rule *vrule,
877 struct flow_cls_offload *fco,
882 switch (admin->vtype) {
886 NL_SET_ERR_MSG_MOD(fco->common.extack,
887 "VLAN pop action not supported in this VCAP");
894 err = vcap_rule_add_action_u32(vrule,
895 VCAP_AF_PUSH_OUTER_TAG,
899 NL_SET_ERR_MSG_MOD(fco->common.extack,
900 "Invalid vlan proto");
906 static int sparx5_tc_action_vlan_modify(struct vcap_admin *admin,
907 struct vcap_rule *vrule,
908 struct flow_cls_offload *fco,
909 struct flow_action_entry *act,
914 switch (admin->vtype) {
916 err = vcap_rule_add_action_u32(vrule,
917 VCAP_AF_PUSH_OUTER_TAG,
923 NL_SET_ERR_MSG_MOD(fco->common.extack,
924 "VLAN modify action not supported in this VCAP");
930 err = vcap_rule_add_action_u32(vrule,
931 VCAP_AF_TAG_A_TPID_SEL,
935 err = vcap_rule_add_action_u32(vrule,
936 VCAP_AF_TAG_A_TPID_SEL,
940 NL_SET_ERR_MSG_MOD(fco->common.extack,
941 "Invalid vlan proto");
947 err = vcap_rule_add_action_u32(vrule,
948 VCAP_AF_TAG_A_VID_SEL,
953 err = vcap_rule_add_action_u32(vrule,
959 err = vcap_rule_add_action_u32(vrule,
960 VCAP_AF_TAG_A_PCP_SEL,
965 err = vcap_rule_add_action_u32(vrule,
971 return vcap_rule_add_action_u32(vrule,
972 VCAP_AF_TAG_A_DEI_SEL,
973 SPX5_DEI_A_CLASSIFIED);
976 static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
977 struct vcap_rule *vrule,
978 struct flow_cls_offload *fco,
979 struct flow_action_entry *act,
982 u16 act_tpid = be16_to_cpu(act->vlan.proto);
985 switch (admin->vtype) {
989 NL_SET_ERR_MSG_MOD(fco->common.extack,
990 "VLAN push action not supported in this VCAP");
994 if (tpid == ETH_P_8021AD) {
995 NL_SET_ERR_MSG_MOD(fco->common.extack,
996 "Cannot push on double tagged frames");
1000 err = sparx5_tc_action_vlan_modify(admin, vrule, fco, act, act_tpid);
1008 /* Push classified tag as inner tag */
1009 err = vcap_rule_add_action_u32(vrule,
1010 VCAP_AF_PUSH_INNER_TAG,
1011 SPX5_ITAG_PUSH_B_TAG);
1014 err = vcap_rule_add_action_u32(vrule,
1015 VCAP_AF_TAG_B_TPID_SEL,
1016 SPX5_TPID_B_CLASSIFIED);
1019 NL_SET_ERR_MSG_MOD(fco->common.extack,
1020 "Invalid vlan proto");
1026 /* Remove rule keys that may prevent templates from matching a keyset */
1027 static void sparx5_tc_flower_simplify_rule(struct vcap_admin *admin,
1028 struct vcap_rule *vrule,
1031 switch (admin->vtype) {
1033 vcap_rule_rem_key(vrule, VCAP_KF_ETYPE);
1038 vcap_rule_rem_key(vrule, VCAP_KF_IP_SNAP_IS);
1047 if (vrule->keyset == VCAP_KFS_IP4_OTHER)
1048 vcap_rule_rem_key(vrule, VCAP_KF_TCP_IS);
1051 if (vrule->keyset == VCAP_KFS_IP6_STD)
1052 vcap_rule_rem_key(vrule, VCAP_KF_TCP_IS);
1053 vcap_rule_rem_key(vrule, VCAP_KF_IP4_IS);
1063 vcap_rule_rem_key(vrule, VCAP_KF_IP4_IS);
1074 static bool sparx5_tc_flower_use_template(struct net_device *ndev,
1075 struct flow_cls_offload *fco,
1076 struct vcap_admin *admin,
1077 struct vcap_rule *vrule)
1079 struct sparx5_port *port = netdev_priv(ndev);
1080 struct sparx5_tc_flower_template *ftp;
1082 list_for_each_entry(ftp, &port->tc_templates, list) {
1083 if (ftp->cid != fco->common.chain_index)
1086 vcap_set_rule_set_keyset(vrule, ftp->keyset);
1087 sparx5_tc_flower_simplify_rule(admin, vrule, ftp->l3_proto);
1093 static int sparx5_tc_flower_replace(struct net_device *ndev,
1094 struct flow_cls_offload *fco,
1095 struct vcap_admin *admin,
1098 struct sparx5_psfp_sf sf = { .max_sdu = SPX5_PSFP_SF_MAX_SDU };
1099 struct netlink_ext_ack *extack = fco->common.extack;
1100 int err, idx, tc_sg_idx = -1, tc_pol_idx = -1;
1101 struct vcap_tc_flower_parse_usage state = {
1103 .l3_proto = ETH_P_ALL,
1106 struct sparx5_port *port = netdev_priv(ndev);
1107 struct sparx5_multiple_rules multi = {};
1108 struct sparx5 *sparx5 = port->sparx5;
1109 struct sparx5_psfp_sg sg = { 0 };
1110 struct sparx5_psfp_fm fm = { 0 };
1111 struct flow_action_entry *act;
1112 struct vcap_control *vctrl;
1113 struct flow_rule *frule;
1114 struct vcap_rule *vrule;
1116 vctrl = port->sparx5->vcap_ctrl;
1118 err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
1122 vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC,
1123 fco->common.prio, 0);
1125 return PTR_ERR(vrule);
1127 vrule->cookie = fco->cookie;
1129 state.vrule = vrule;
1130 state.frule = flow_cls_offload_flow_rule(fco);
1131 err = sparx5_tc_use_dissectors(&state, admin, vrule);
1135 err = sparx5_tc_add_rule_counter(admin, vrule);
1139 err = sparx5_tc_add_rule_link_target(admin, vrule,
1140 fco->common.chain_index);
1144 frule = flow_cls_offload_flow_rule(fco);
1145 flow_action_for_each(idx, act, &frule->action) {
1147 case FLOW_ACTION_GATE: {
1148 err = sparx5_tc_flower_parse_act_gate(&sg, act, extack);
1152 tc_sg_idx = act->hw_index;
1156 case FLOW_ACTION_POLICE: {
1157 err = sparx5_tc_flower_parse_act_police(&fm.pol, act,
1162 tc_pol_idx = fm.pol.idx;
1163 sf.max_sdu = act->police.mtu;
1167 case FLOW_ACTION_TRAP:
1168 err = sparx5_tc_action_trap(admin, vrule, fco);
1172 case FLOW_ACTION_ACCEPT:
1173 err = sparx5_tc_set_actionset(admin, vrule);
1177 case FLOW_ACTION_GOTO:
1178 err = sparx5_tc_set_actionset(admin, vrule);
1181 sparx5_tc_add_rule_link(vctrl, admin, vrule,
1182 fco->common.chain_index,
1185 case FLOW_ACTION_VLAN_POP:
1186 err = sparx5_tc_action_vlan_pop(admin, vrule, fco,
1191 case FLOW_ACTION_VLAN_PUSH:
1192 err = sparx5_tc_action_vlan_push(admin, vrule, fco,
1197 case FLOW_ACTION_VLAN_MANGLE:
1198 err = sparx5_tc_action_vlan_modify(admin, vrule, fco,
1204 NL_SET_ERR_MSG_MOD(fco->common.extack,
1205 "Unsupported TC action");
1212 if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
1213 err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
1214 tc_pol_idx, &sg, &fm, &sf);
1219 if (!sparx5_tc_flower_use_template(ndev, fco, admin, vrule)) {
1220 err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin,
1221 state.l3_proto, &multi);
1223 NL_SET_ERR_MSG_MOD(fco->common.extack,
1224 "No matching port keyset for filter protocol and keys");
1229 /* provide the l3 protocol to guide the keyset selection */
1230 err = vcap_val_rule(vrule, state.l3_proto);
1232 vcap_set_tc_exterr(fco, vrule);
1235 err = vcap_add_rule(vrule);
1237 NL_SET_ERR_MSG_MOD(fco->common.extack,
1238 "Could not add the filter");
1240 if (state.l3_proto == ETH_P_ALL)
1241 err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
1245 vcap_free_rule(vrule);
1249 static void sparx5_tc_free_psfp_resources(struct sparx5 *sparx5,
1250 struct vcap_rule *vrule)
1252 struct vcap_client_actionfield *afield;
1253 u32 isdx, sfid, sgid, fmid;
1255 /* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
1256 * it is used for stream and/or flow-meter classification.
1258 afield = vcap_find_actionfield(vrule, VCAP_AF_ISDX_VAL);
1262 isdx = afield->data.u32.value;
1263 sfid = sparx5_psfp_isdx_get_sf(sparx5, isdx);
1268 fmid = sparx5_psfp_isdx_get_fm(sparx5, isdx);
1269 sgid = sparx5_psfp_sf_get_sg(sparx5, sfid);
1271 if (fmid && sparx5_psfp_fm_del(sparx5, fmid) < 0)
1272 pr_err("%s:%d Could not delete invalid fmid: %d", __func__,
1275 if (sgid && sparx5_psfp_sg_del(sparx5, sgid) < 0)
1276 pr_err("%s:%d Could not delete invalid sgid: %d", __func__,
1279 if (sparx5_psfp_sf_del(sparx5, sfid) < 0)
1280 pr_err("%s:%d Could not delete invalid sfid: %d", __func__,
1283 sparx5_isdx_conf_set(sparx5, isdx, 0, 0);
1286 static int sparx5_tc_free_rule_resources(struct net_device *ndev,
1287 struct vcap_control *vctrl,
1290 struct sparx5_port *port = netdev_priv(ndev);
1291 struct sparx5 *sparx5 = port->sparx5;
1292 struct vcap_rule *vrule;
1295 vrule = vcap_get_rule(vctrl, rule_id);
1299 sparx5_tc_free_psfp_resources(sparx5, vrule);
1301 vcap_free_rule(vrule);
1305 static int sparx5_tc_flower_destroy(struct net_device *ndev,
1306 struct flow_cls_offload *fco,
1307 struct vcap_admin *admin)
1309 struct sparx5_port *port = netdev_priv(ndev);
1310 int err = -ENOENT, count = 0, rule_id;
1311 struct vcap_control *vctrl;
1313 vctrl = port->sparx5->vcap_ctrl;
1315 rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
1319 /* Resources are attached to the first rule of
1320 * a set of rules. Only works if the rules are
1321 * in the correct order.
1323 err = sparx5_tc_free_rule_resources(ndev, vctrl,
1326 pr_err("%s:%d: could not free resources %d\n",
1327 __func__, __LINE__, rule_id);
1329 err = vcap_del_rule(vctrl, ndev, rule_id);
1331 pr_err("%s:%d: could not delete rule %d\n",
1332 __func__, __LINE__, rule_id);
1339 static int sparx5_tc_flower_stats(struct net_device *ndev,
1340 struct flow_cls_offload *fco,
1341 struct vcap_admin *admin)
1343 struct sparx5_port *port = netdev_priv(ndev);
1344 struct vcap_counter ctr = {};
1345 struct vcap_control *vctrl;
1349 vctrl = port->sparx5->vcap_ctrl;
1350 err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
1353 flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
1354 FLOW_ACTION_HW_STATS_IMMEDIATE);
1358 static int sparx5_tc_flower_template_create(struct net_device *ndev,
1359 struct flow_cls_offload *fco,
1360 struct vcap_admin *admin)
1362 struct sparx5_port *port = netdev_priv(ndev);
1363 struct vcap_tc_flower_parse_usage state = {
1365 .l3_proto = ETH_P_ALL,
1368 struct sparx5_tc_flower_template *ftp;
1369 struct vcap_keyset_list kslist = {};
1370 enum vcap_keyfield_set keysets[10];
1371 struct vcap_control *vctrl;
1372 struct vcap_rule *vrule;
1375 if (admin->vtype == VCAP_TYPE_ES0) {
1376 pr_err("%s:%d: %s\n", __func__, __LINE__,
1377 "VCAP does not support templates");
1381 count = vcap_admin_rule_count(admin, fco->common.chain_index);
1383 pr_err("%s:%d: %s\n", __func__, __LINE__,
1384 "Filters are already present");
1388 ftp = kzalloc(sizeof(*ftp), GFP_KERNEL);
1392 ftp->cid = fco->common.chain_index;
1393 ftp->orig = VCAP_KFS_NO_VALUE;
1394 ftp->keyset = VCAP_KFS_NO_VALUE;
1396 vctrl = port->sparx5->vcap_ctrl;
1397 vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index,
1398 VCAP_USER_TC, fco->common.prio, 0);
1399 if (IS_ERR(vrule)) {
1400 err = PTR_ERR(vrule);
1404 state.vrule = vrule;
1405 state.frule = flow_cls_offload_flow_rule(fco);
1406 err = sparx5_tc_use_dissectors(&state, admin, vrule);
1408 pr_err("%s:%d: key error: %d\n", __func__, __LINE__, err);
1412 ftp->l3_proto = state.l3_proto;
1414 sparx5_tc_flower_simplify_rule(admin, vrule, state.l3_proto);
1416 /* Find the keysets that the rule can use */
1417 kslist.keysets = keysets;
1418 kslist.max = ARRAY_SIZE(keysets);
1419 if (!vcap_rule_find_keysets(vrule, &kslist)) {
1420 pr_err("%s:%d: %s\n", __func__, __LINE__,
1421 "Could not find a suitable keyset");
1426 ftp->keyset = vcap_select_min_rule_keyset(vctrl, admin->vtype, &kslist);
1428 sparx5_vcap_set_port_keyset(ndev, admin, fco->common.chain_index,
1434 ftp->orig = kslist.keysets[0];
1436 /* Store new template */
1437 list_add_tail(&ftp->list, &port->tc_templates);
1438 vcap_free_rule(vrule);
1442 vcap_free_rule(vrule);
1448 static int sparx5_tc_flower_template_destroy(struct net_device *ndev,
1449 struct flow_cls_offload *fco,
1450 struct vcap_admin *admin)
1452 struct sparx5_port *port = netdev_priv(ndev);
1453 struct sparx5_tc_flower_template *ftp, *tmp;
1456 /* Rules using the template are removed by the tc framework */
1457 list_for_each_entry_safe(ftp, tmp, &port->tc_templates, list) {
1458 if (ftp->cid != fco->common.chain_index)
1461 sparx5_vcap_set_port_keyset(ndev, admin,
1462 fco->common.chain_index,
1463 ftp->l3_proto, ftp->orig,
1465 list_del(&ftp->list);
1472 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
1475 struct sparx5_port *port = netdev_priv(ndev);
1476 struct vcap_control *vctrl;
1477 struct vcap_admin *admin;
1480 /* Get vcap instance from the chain id */
1481 vctrl = port->sparx5->vcap_ctrl;
1482 admin = vcap_find_admin(vctrl, fco->common.chain_index);
1484 NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain");
1488 switch (fco->command) {
1489 case FLOW_CLS_REPLACE:
1490 return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
1491 case FLOW_CLS_DESTROY:
1492 return sparx5_tc_flower_destroy(ndev, fco, admin);
1493 case FLOW_CLS_STATS:
1494 return sparx5_tc_flower_stats(ndev, fco, admin);
1495 case FLOW_CLS_TMPLT_CREATE:
1496 return sparx5_tc_flower_template_create(ndev, fco, admin);
1497 case FLOW_CLS_TMPLT_DESTROY:
1498 return sparx5_tc_flower_template_destroy(ndev, fco, admin);