1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
15 #include "core_acl_flex_keys.h"
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18 struct mlxsw_sp_acl_block *block,
19 struct mlxsw_sp_acl_rule_info *rulei,
20 struct tcf_exts *exts,
21 struct netlink_ext_ack *extack)
23 const struct tc_action *a;
27 if (!tcf_exts_has_actions(exts))
30 /* Count action is inserted first */
31 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
35 tcf_exts_to_list(exts, &actions);
36 list_for_each_entry(a, &actions, list) {
37 if (is_tcf_gact_ok(a)) {
38 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
40 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
43 } else if (is_tcf_gact_shot(a)) {
44 err = mlxsw_sp_acl_rulei_act_drop(rulei);
46 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
49 } else if (is_tcf_gact_trap(a)) {
50 err = mlxsw_sp_acl_rulei_act_trap(rulei);
52 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
55 } else if (is_tcf_gact_goto_chain(a)) {
56 u32 chain_index = tcf_gact_goto_chain_index(a);
57 struct mlxsw_sp_acl_ruleset *ruleset;
60 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
62 MLXSW_SP_ACL_PROFILE_FLOWER);
64 return PTR_ERR(ruleset);
66 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
67 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
69 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
72 } else if (is_tcf_mirred_egress_redirect(a)) {
73 struct net_device *out_dev;
74 struct mlxsw_sp_fid *fid;
77 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
78 fid_index = mlxsw_sp_fid_index(fid);
79 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
84 out_dev = tcf_mirred_dev(a);
85 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
89 } else if (is_tcf_mirred_egress_mirror(a)) {
90 struct net_device *out_dev = tcf_mirred_dev(a);
92 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
97 } else if (is_tcf_vlan(a)) {
98 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
99 u32 action = tcf_vlan_action(a);
100 u8 prio = tcf_vlan_push_prio(a);
101 u16 vid = tcf_vlan_push_vid(a);
103 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
105 proto, prio, extack);
107 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
108 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
115 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
116 struct tc_cls_flower_offload *f)
118 struct flow_dissector_key_ipv4_addrs *key =
119 skb_flow_dissector_target(f->dissector,
120 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
122 struct flow_dissector_key_ipv4_addrs *mask =
123 skb_flow_dissector_target(f->dissector,
124 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
127 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
129 (char *) &mask->src, 4);
130 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
132 (char *) &mask->dst, 4);
135 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
136 struct tc_cls_flower_offload *f)
138 struct flow_dissector_key_ipv6_addrs *key =
139 skb_flow_dissector_target(f->dissector,
140 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
142 struct flow_dissector_key_ipv6_addrs *mask =
143 skb_flow_dissector_target(f->dissector,
144 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
147 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
148 &key->src.s6_addr[0x0],
149 &mask->src.s6_addr[0x0], 4);
150 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
151 &key->src.s6_addr[0x4],
152 &mask->src.s6_addr[0x4], 4);
153 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
154 &key->src.s6_addr[0x8],
155 &mask->src.s6_addr[0x8], 4);
156 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
157 &key->src.s6_addr[0xC],
158 &mask->src.s6_addr[0xC], 4);
159 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
160 &key->dst.s6_addr[0x0],
161 &mask->dst.s6_addr[0x0], 4);
162 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
163 &key->dst.s6_addr[0x4],
164 &mask->dst.s6_addr[0x4], 4);
165 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
166 &key->dst.s6_addr[0x8],
167 &mask->dst.s6_addr[0x8], 4);
168 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
169 &key->dst.s6_addr[0xC],
170 &mask->dst.s6_addr[0xC], 4);
173 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
174 struct mlxsw_sp_acl_rule_info *rulei,
175 struct tc_cls_flower_offload *f,
178 struct flow_dissector_key_ports *key, *mask;
180 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
183 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
184 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
185 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
189 key = skb_flow_dissector_target(f->dissector,
190 FLOW_DISSECTOR_KEY_PORTS,
192 mask = skb_flow_dissector_target(f->dissector,
193 FLOW_DISSECTOR_KEY_PORTS,
195 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
196 ntohs(key->dst), ntohs(mask->dst));
197 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
198 ntohs(key->src), ntohs(mask->src));
202 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
203 struct mlxsw_sp_acl_rule_info *rulei,
204 struct tc_cls_flower_offload *f,
207 struct flow_dissector_key_tcp *key, *mask;
209 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
212 if (ip_proto != IPPROTO_TCP) {
213 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
214 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
218 key = skb_flow_dissector_target(f->dissector,
219 FLOW_DISSECTOR_KEY_TCP,
221 mask = skb_flow_dissector_target(f->dissector,
222 FLOW_DISSECTOR_KEY_TCP,
224 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
225 ntohs(key->flags), ntohs(mask->flags));
229 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
230 struct mlxsw_sp_acl_rule_info *rulei,
231 struct tc_cls_flower_offload *f,
234 struct flow_dissector_key_ip *key, *mask;
236 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
239 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
240 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
241 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
245 key = skb_flow_dissector_target(f->dissector,
246 FLOW_DISSECTOR_KEY_IP,
248 mask = skb_flow_dissector_target(f->dissector,
249 FLOW_DISSECTOR_KEY_IP,
251 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
252 key->ttl, mask->ttl);
254 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
255 key->tos & 0x3, mask->tos & 0x3);
257 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
258 key->tos >> 6, mask->tos >> 6);
263 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
264 struct mlxsw_sp_acl_block *block,
265 struct mlxsw_sp_acl_rule_info *rulei,
266 struct tc_cls_flower_offload *f)
268 u16 n_proto_mask = 0;
274 if (f->dissector->used_keys &
275 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
276 BIT(FLOW_DISSECTOR_KEY_BASIC) |
277 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
278 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
279 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
280 BIT(FLOW_DISSECTOR_KEY_PORTS) |
281 BIT(FLOW_DISSECTOR_KEY_TCP) |
282 BIT(FLOW_DISSECTOR_KEY_IP) |
283 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
284 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
285 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
289 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
291 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
292 struct flow_dissector_key_control *key =
293 skb_flow_dissector_target(f->dissector,
294 FLOW_DISSECTOR_KEY_CONTROL,
296 addr_type = key->addr_type;
299 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
300 struct flow_dissector_key_basic *key =
301 skb_flow_dissector_target(f->dissector,
302 FLOW_DISSECTOR_KEY_BASIC,
304 struct flow_dissector_key_basic *mask =
305 skb_flow_dissector_target(f->dissector,
306 FLOW_DISSECTOR_KEY_BASIC,
308 n_proto_key = ntohs(key->n_proto);
309 n_proto_mask = ntohs(mask->n_proto);
311 if (n_proto_key == ETH_P_ALL) {
315 mlxsw_sp_acl_rulei_keymask_u32(rulei,
316 MLXSW_AFK_ELEMENT_ETHERTYPE,
317 n_proto_key, n_proto_mask);
319 ip_proto = key->ip_proto;
320 mlxsw_sp_acl_rulei_keymask_u32(rulei,
321 MLXSW_AFK_ELEMENT_IP_PROTO,
322 key->ip_proto, mask->ip_proto);
325 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
326 struct flow_dissector_key_eth_addrs *key =
327 skb_flow_dissector_target(f->dissector,
328 FLOW_DISSECTOR_KEY_ETH_ADDRS,
330 struct flow_dissector_key_eth_addrs *mask =
331 skb_flow_dissector_target(f->dissector,
332 FLOW_DISSECTOR_KEY_ETH_ADDRS,
335 mlxsw_sp_acl_rulei_keymask_buf(rulei,
336 MLXSW_AFK_ELEMENT_DMAC_32_47,
337 key->dst, mask->dst, 2);
338 mlxsw_sp_acl_rulei_keymask_buf(rulei,
339 MLXSW_AFK_ELEMENT_DMAC_0_31,
340 key->dst + 2, mask->dst + 2, 4);
341 mlxsw_sp_acl_rulei_keymask_buf(rulei,
342 MLXSW_AFK_ELEMENT_SMAC_32_47,
343 key->src, mask->src, 2);
344 mlxsw_sp_acl_rulei_keymask_buf(rulei,
345 MLXSW_AFK_ELEMENT_SMAC_0_31,
346 key->src + 2, mask->src + 2, 4);
349 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
350 struct flow_dissector_key_vlan *key =
351 skb_flow_dissector_target(f->dissector,
352 FLOW_DISSECTOR_KEY_VLAN,
354 struct flow_dissector_key_vlan *mask =
355 skb_flow_dissector_target(f->dissector,
356 FLOW_DISSECTOR_KEY_VLAN,
359 if (mlxsw_sp_acl_block_is_egress_bound(block)) {
360 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
363 if (mask->vlan_id != 0)
364 mlxsw_sp_acl_rulei_keymask_u32(rulei,
365 MLXSW_AFK_ELEMENT_VID,
368 if (mask->vlan_priority != 0)
369 mlxsw_sp_acl_rulei_keymask_u32(rulei,
370 MLXSW_AFK_ELEMENT_PCP,
372 mask->vlan_priority);
375 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
376 mlxsw_sp_flower_parse_ipv4(rulei, f);
378 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
379 mlxsw_sp_flower_parse_ipv6(rulei, f);
381 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
384 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
388 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
392 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
396 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
397 struct mlxsw_sp_acl_block *block,
398 struct tc_cls_flower_offload *f)
400 struct mlxsw_sp_acl_rule_info *rulei;
401 struct mlxsw_sp_acl_ruleset *ruleset;
402 struct mlxsw_sp_acl_rule *rule;
405 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
406 f->common.chain_index,
407 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
409 return PTR_ERR(ruleset);
411 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
415 goto err_rule_create;
418 rulei = mlxsw_sp_acl_rule_rulei(rule);
419 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
421 goto err_flower_parse;
423 err = mlxsw_sp_acl_rulei_commit(rulei);
425 goto err_rulei_commit;
427 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
431 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
437 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
439 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
443 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
444 struct mlxsw_sp_acl_block *block,
445 struct tc_cls_flower_offload *f)
447 struct mlxsw_sp_acl_ruleset *ruleset;
448 struct mlxsw_sp_acl_rule *rule;
450 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
451 f->common.chain_index,
452 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
456 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
458 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
459 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
462 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
465 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
466 struct mlxsw_sp_acl_block *block,
467 struct tc_cls_flower_offload *f)
469 struct mlxsw_sp_acl_ruleset *ruleset;
470 struct mlxsw_sp_acl_rule *rule;
476 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
477 f->common.chain_index,
478 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
479 if (WARN_ON(IS_ERR(ruleset)))
482 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
486 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
489 goto err_rule_get_stats;
491 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
493 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
497 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
501 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
502 struct mlxsw_sp_acl_block *block,
503 struct tc_cls_flower_offload *f)
505 struct mlxsw_sp_acl_ruleset *ruleset;
506 struct mlxsw_sp_acl_rule_info rulei;
509 memset(&rulei, 0, sizeof(rulei));
510 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
513 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
514 f->common.chain_index,
515 MLXSW_SP_ACL_PROFILE_FLOWER,
516 &rulei.values.elusage);
518 /* keep the reference to the ruleset */
519 return PTR_ERR_OR_ZERO(ruleset);
522 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
523 struct mlxsw_sp_acl_block *block,
524 struct tc_cls_flower_offload *f)
526 struct mlxsw_sp_acl_ruleset *ruleset;
528 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
529 f->common.chain_index,
530 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
533 /* put the reference to the ruleset kept in create */
534 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
535 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);