1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
5 #include "prestera_acl.h"
6 #include "prestera_flow.h"
7 #include "prestera_flower.h"
9 struct prestera_flower_template {
10 struct prestera_acl_ruleset *ruleset;
11 struct list_head list;
16 prestera_flower_template_free(struct prestera_flower_template *template)
18 prestera_acl_ruleset_put(template->ruleset);
19 list_del(&template->list);
23 void prestera_flower_template_cleanup(struct prestera_flow_block *block)
25 struct prestera_flower_template *template, *tmp;
27 /* put the reference to all rulesets kept in tmpl create */
28 list_for_each_entry_safe(template, tmp, &block->template_list, list)
29 prestera_flower_template_free(template);
33 prestera_flower_parse_goto_action(struct prestera_flow_block *block,
34 struct prestera_acl_rule *rule,
36 const struct flow_action_entry *act)
38 struct prestera_acl_ruleset *ruleset;
40 if (act->chain_index <= chain_index)
41 /* we can jump only forward */
44 if (rule->re_arg.jump.valid)
47 ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
50 return PTR_ERR(ruleset);
52 rule->re_arg.jump.valid = 1;
53 rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
55 rule->jump_ruleset = ruleset;
60 static int prestera_flower_parse_actions(struct prestera_flow_block *block,
61 struct prestera_acl_rule *rule,
62 struct flow_action *flow_action,
64 struct netlink_ext_ack *extack)
66 const struct flow_action_entry *act;
69 /* whole struct (rule->re_arg) must be initialized with 0 */
70 if (!flow_action_has_entries(flow_action))
73 if (!flow_action_mixed_hw_stats_check(flow_action, extack))
76 act = flow_action_first_entry_get(flow_action);
77 if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
79 } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
80 /* setup counter first */
81 rule->re_arg.count.valid = true;
82 err = prestera_acl_chain_to_client(chain_index,
83 &rule->re_arg.count.client);
87 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
91 flow_action_for_each(i, act, flow_action) {
93 case FLOW_ACTION_ACCEPT:
94 if (rule->re_arg.accept.valid)
97 rule->re_arg.accept.valid = 1;
99 case FLOW_ACTION_DROP:
100 if (rule->re_arg.drop.valid)
103 rule->re_arg.drop.valid = 1;
105 case FLOW_ACTION_TRAP:
106 if (rule->re_arg.trap.valid)
109 rule->re_arg.trap.valid = 1;
111 case FLOW_ACTION_POLICE:
112 if (rule->re_arg.police.valid)
115 rule->re_arg.police.valid = 1;
116 rule->re_arg.police.rate =
117 act->police.rate_bytes_ps;
118 rule->re_arg.police.burst = act->police.burst;
119 rule->re_arg.police.ingress = true;
121 case FLOW_ACTION_GOTO:
122 err = prestera_flower_parse_goto_action(block, rule,
129 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
130 pr_err("Unsupported action\n");
138 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
139 struct flow_cls_offload *f,
140 struct prestera_flow_block *block)
141 { struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
142 struct prestera_acl_match *r_match = &rule->re_key.match;
143 struct prestera_port *port;
144 struct net_device *ingress_dev;
145 struct flow_match_meta match;
148 flow_rule_match_meta(f_rule, &match);
149 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
150 NL_SET_ERR_MSG_MOD(f->common.extack,
151 "Unsupported ingress ifindex mask");
155 ingress_dev = __dev_get_by_index(block->net,
156 match.key->ingress_ifindex);
158 NL_SET_ERR_MSG_MOD(f->common.extack,
159 "Can't find specified ingress port to match on");
163 if (!prestera_netdev_check(ingress_dev)) {
164 NL_SET_ERR_MSG_MOD(f->common.extack,
165 "Can't match on switchdev ingress port");
168 port = netdev_priv(ingress_dev);
170 mask = htons(0x1FFF << 3);
171 key = htons(port->hw_id << 3);
172 rule_match_set(r_match->key, SYS_PORT, key);
173 rule_match_set(r_match->mask, SYS_PORT, mask);
176 key = htons(port->dev_id);
177 rule_match_set(r_match->key, SYS_DEV, key);
178 rule_match_set(r_match->mask, SYS_DEV, mask);
184 static int prestera_flower_parse(struct prestera_flow_block *block,
185 struct prestera_acl_rule *rule,
186 struct flow_cls_offload *f)
187 { struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
188 struct flow_dissector *dissector = f_rule->match.dissector;
189 struct prestera_acl_match *r_match = &rule->re_key.match;
190 __be16 n_proto_mask = 0;
191 __be16 n_proto_key = 0;
196 if (dissector->used_keys &
197 ~(BIT(FLOW_DISSECTOR_KEY_META) |
198 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
199 BIT(FLOW_DISSECTOR_KEY_BASIC) |
200 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
201 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
202 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
203 BIT(FLOW_DISSECTOR_KEY_ICMP) |
204 BIT(FLOW_DISSECTOR_KEY_PORTS) |
205 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
206 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
210 prestera_acl_rule_priority_set(rule, f->common.prio);
212 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
213 err = prestera_flower_parse_meta(rule, f, block);
218 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
219 struct flow_match_control match;
221 flow_rule_match_control(f_rule, &match);
222 addr_type = match.key->addr_type;
225 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
226 struct flow_match_basic match;
228 flow_rule_match_basic(f_rule, &match);
229 n_proto_key = match.key->n_proto;
230 n_proto_mask = match.mask->n_proto;
232 if (ntohs(match.key->n_proto) == ETH_P_ALL) {
237 rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
238 rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
240 rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
241 rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
242 ip_proto = match.key->ip_proto;
245 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
246 struct flow_match_eth_addrs match;
248 flow_rule_match_eth_addrs(f_rule, &match);
251 rule_match_set_n(r_match->key,
252 ETH_DMAC_0, &match.key->dst[0], 4);
253 rule_match_set_n(r_match->key,
254 ETH_DMAC_1, &match.key->dst[4], 2);
256 rule_match_set_n(r_match->mask,
257 ETH_DMAC_0, &match.mask->dst[0], 4);
258 rule_match_set_n(r_match->mask,
259 ETH_DMAC_1, &match.mask->dst[4], 2);
262 rule_match_set_n(r_match->key,
263 ETH_SMAC_0, &match.key->src[0], 4);
264 rule_match_set_n(r_match->key,
265 ETH_SMAC_1, &match.key->src[4], 2);
267 rule_match_set_n(r_match->mask,
268 ETH_SMAC_0, &match.mask->src[0], 4);
269 rule_match_set_n(r_match->mask,
270 ETH_SMAC_1, &match.mask->src[4], 2);
273 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
274 struct flow_match_ipv4_addrs match;
276 flow_rule_match_ipv4_addrs(f_rule, &match);
278 rule_match_set(r_match->key, IP_SRC, match.key->src);
279 rule_match_set(r_match->mask, IP_SRC, match.mask->src);
281 rule_match_set(r_match->key, IP_DST, match.key->dst);
282 rule_match_set(r_match->mask, IP_DST, match.mask->dst);
285 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
286 struct flow_match_ports match;
288 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
291 "Only UDP and TCP keys are supported");
295 flow_rule_match_ports(f_rule, &match);
297 rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
298 rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
300 rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
301 rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
304 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
305 struct flow_match_vlan match;
307 flow_rule_match_vlan(f_rule, &match);
309 if (match.mask->vlan_id != 0) {
310 __be16 key = cpu_to_be16(match.key->vlan_id);
311 __be16 mask = cpu_to_be16(match.mask->vlan_id);
313 rule_match_set(r_match->key, VLAN_ID, key);
314 rule_match_set(r_match->mask, VLAN_ID, mask);
317 rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
318 rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
321 if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
322 struct flow_match_icmp match;
324 flow_rule_match_icmp(f_rule, &match);
326 rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
327 rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
329 rule_match_set(r_match->key, ICMP_CODE, match.key->code);
330 rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
333 return prestera_flower_parse_actions(block, rule, &f->rule->action,
334 f->common.chain_index,
338 int prestera_flower_replace(struct prestera_flow_block *block,
339 struct flow_cls_offload *f)
341 struct prestera_acl_ruleset *ruleset;
342 struct prestera_acl *acl = block->sw->acl;
343 struct prestera_acl_rule *rule;
346 ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
348 return PTR_ERR(ruleset);
350 /* increments the ruleset reference */
351 rule = prestera_acl_rule_create(ruleset, f->cookie,
352 f->common.chain_index);
355 goto err_rule_create;
358 err = prestera_flower_parse(block, rule, f);
362 if (!prestera_acl_ruleset_is_offload(ruleset)) {
363 err = prestera_acl_ruleset_offload(ruleset);
365 goto err_ruleset_offload;
368 err = prestera_acl_rule_add(block->sw, rule);
372 prestera_acl_ruleset_put(ruleset);
377 prestera_acl_rule_destroy(rule);
379 prestera_acl_ruleset_put(ruleset);
383 void prestera_flower_destroy(struct prestera_flow_block *block,
384 struct flow_cls_offload *f)
386 struct prestera_acl_ruleset *ruleset;
387 struct prestera_acl_rule *rule;
389 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
390 f->common.chain_index);
394 rule = prestera_acl_rule_lookup(ruleset, f->cookie);
396 prestera_acl_rule_del(block->sw, rule);
397 prestera_acl_rule_destroy(rule);
399 prestera_acl_ruleset_put(ruleset);
403 int prestera_flower_tmplt_create(struct prestera_flow_block *block,
404 struct flow_cls_offload *f)
406 struct prestera_flower_template *template;
407 struct prestera_acl_ruleset *ruleset;
408 struct prestera_acl_rule rule;
411 memset(&rule, 0, sizeof(rule));
412 err = prestera_flower_parse(block, &rule, f);
416 template = kmalloc(sizeof(*template), GFP_KERNEL);
422 prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
423 ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
424 f->common.chain_index);
425 if (IS_ERR_OR_NULL(ruleset)) {
427 goto err_ruleset_get;
430 /* preserve keymask/template to this ruleset */
431 prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
433 /* skip error, as it is not possible to reject template operation,
434 * so, keep the reference to the ruleset for rules to be added
435 * to that ruleset later. In case of offload fail, the ruleset
436 * will be offloaded again during adding a new rule. Also,
437 * unlikly possble that ruleset is already offloaded at this staage.
439 prestera_acl_ruleset_offload(ruleset);
441 /* keep the reference to the ruleset */
442 template->ruleset = ruleset;
443 template->chain_index = f->common.chain_index;
444 list_add_rcu(&template->list, &block->template_list);
450 NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
454 void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
455 struct flow_cls_offload *f)
457 struct prestera_flower_template *template, *tmp;
459 list_for_each_entry_safe(template, tmp, &block->template_list, list)
460 if (template->chain_index == f->common.chain_index) {
461 /* put the reference to the ruleset kept in create */
462 prestera_flower_template_free(template);
467 int prestera_flower_stats(struct prestera_flow_block *block,
468 struct flow_cls_offload *f)
470 struct prestera_acl_ruleset *ruleset;
471 struct prestera_acl_rule *rule;
477 ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
478 f->common.chain_index);
480 return PTR_ERR(ruleset);
482 rule = prestera_acl_rule_lookup(ruleset, f->cookie);
485 goto err_rule_get_stats;
488 err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
491 goto err_rule_get_stats;
493 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
494 FLOW_ACTION_HW_STATS_DELAYED);
497 prestera_acl_ruleset_put(ruleset);