2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/flow_dissector.h>
39 #include <net/pkt_cls.h>
40 #include <net/tc_act/tc_gact.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
45 #include "core_acl_flex_keys.h"
47 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
48 struct net_device *dev,
49 struct mlxsw_sp_acl_rule_info *rulei,
50 struct tcf_exts *exts)
52 const struct tc_action *a;
56 if (tc_no_actions(exts))
59 /* Count action is inserted first */
60 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
64 tcf_exts_to_list(exts, &actions);
65 list_for_each_entry(a, &actions, list) {
66 if (is_tcf_gact_shot(a)) {
67 err = mlxsw_sp_acl_rulei_act_drop(rulei);
70 } else if (is_tcf_mirred_egress_redirect(a)) {
71 int ifindex = tcf_mirred_ifindex(a);
72 struct net_device *out_dev;
73 struct mlxsw_sp_fid *fid;
76 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
77 fid_index = mlxsw_sp_fid_index(fid);
78 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
83 out_dev = __dev_get_by_index(dev_net(dev), ifindex);
87 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
91 } else if (is_tcf_vlan(a)) {
92 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
93 u32 action = tcf_vlan_action(a);
94 u8 prio = tcf_vlan_push_prio(a);
95 u16 vid = tcf_vlan_push_vid(a);
97 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
101 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
108 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
109 struct tc_cls_flower_offload *f)
111 struct flow_dissector_key_ipv4_addrs *key =
112 skb_flow_dissector_target(f->dissector,
113 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
115 struct flow_dissector_key_ipv4_addrs *mask =
116 skb_flow_dissector_target(f->dissector,
117 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
120 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
121 ntohl(key->src), ntohl(mask->src));
122 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
123 ntohl(key->dst), ntohl(mask->dst));
126 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
127 struct tc_cls_flower_offload *f)
129 struct flow_dissector_key_ipv6_addrs *key =
130 skb_flow_dissector_target(f->dissector,
131 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
133 struct flow_dissector_key_ipv6_addrs *mask =
134 skb_flow_dissector_target(f->dissector,
135 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
137 size_t addr_half_size = sizeof(key->src) / 2;
139 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
140 &key->src.s6_addr[0],
141 &mask->src.s6_addr[0],
143 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
144 &key->src.s6_addr[addr_half_size],
145 &mask->src.s6_addr[addr_half_size],
147 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
148 &key->dst.s6_addr[0],
149 &mask->dst.s6_addr[0],
151 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
152 &key->dst.s6_addr[addr_half_size],
153 &mask->dst.s6_addr[addr_half_size],
157 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
158 struct mlxsw_sp_acl_rule_info *rulei,
159 struct tc_cls_flower_offload *f,
162 struct flow_dissector_key_ports *key, *mask;
164 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
167 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
168 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
172 key = skb_flow_dissector_target(f->dissector,
173 FLOW_DISSECTOR_KEY_PORTS,
175 mask = skb_flow_dissector_target(f->dissector,
176 FLOW_DISSECTOR_KEY_PORTS,
178 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
179 ntohs(key->dst), ntohs(mask->dst));
180 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
181 ntohs(key->src), ntohs(mask->src));
185 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
186 struct mlxsw_sp_acl_rule_info *rulei,
187 struct tc_cls_flower_offload *f,
190 struct flow_dissector_key_tcp *key, *mask;
192 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
195 if (ip_proto != IPPROTO_TCP) {
196 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
200 key = skb_flow_dissector_target(f->dissector,
201 FLOW_DISSECTOR_KEY_TCP,
203 mask = skb_flow_dissector_target(f->dissector,
204 FLOW_DISSECTOR_KEY_TCP,
206 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
207 ntohs(key->flags), ntohs(mask->flags));
211 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
212 struct net_device *dev,
213 struct mlxsw_sp_acl_rule_info *rulei,
214 struct tc_cls_flower_offload *f)
220 if (f->dissector->used_keys &
221 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
222 BIT(FLOW_DISSECTOR_KEY_BASIC) |
223 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
224 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
225 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
226 BIT(FLOW_DISSECTOR_KEY_PORTS) |
227 BIT(FLOW_DISSECTOR_KEY_TCP) |
228 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
229 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
233 mlxsw_sp_acl_rulei_priority(rulei, f->prio);
235 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
236 struct flow_dissector_key_control *key =
237 skb_flow_dissector_target(f->dissector,
238 FLOW_DISSECTOR_KEY_CONTROL,
240 addr_type = key->addr_type;
243 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
244 struct flow_dissector_key_basic *key =
245 skb_flow_dissector_target(f->dissector,
246 FLOW_DISSECTOR_KEY_BASIC,
248 struct flow_dissector_key_basic *mask =
249 skb_flow_dissector_target(f->dissector,
250 FLOW_DISSECTOR_KEY_BASIC,
252 u16 n_proto_key = ntohs(key->n_proto);
253 u16 n_proto_mask = ntohs(mask->n_proto);
255 if (n_proto_key == ETH_P_ALL) {
259 mlxsw_sp_acl_rulei_keymask_u32(rulei,
260 MLXSW_AFK_ELEMENT_ETHERTYPE,
261 n_proto_key, n_proto_mask);
263 ip_proto = key->ip_proto;
264 mlxsw_sp_acl_rulei_keymask_u32(rulei,
265 MLXSW_AFK_ELEMENT_IP_PROTO,
266 key->ip_proto, mask->ip_proto);
269 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
270 struct flow_dissector_key_eth_addrs *key =
271 skb_flow_dissector_target(f->dissector,
272 FLOW_DISSECTOR_KEY_ETH_ADDRS,
274 struct flow_dissector_key_eth_addrs *mask =
275 skb_flow_dissector_target(f->dissector,
276 FLOW_DISSECTOR_KEY_ETH_ADDRS,
279 mlxsw_sp_acl_rulei_keymask_buf(rulei,
280 MLXSW_AFK_ELEMENT_DMAC,
283 mlxsw_sp_acl_rulei_keymask_buf(rulei,
284 MLXSW_AFK_ELEMENT_SMAC,
289 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
290 struct flow_dissector_key_vlan *key =
291 skb_flow_dissector_target(f->dissector,
292 FLOW_DISSECTOR_KEY_VLAN,
294 struct flow_dissector_key_vlan *mask =
295 skb_flow_dissector_target(f->dissector,
296 FLOW_DISSECTOR_KEY_VLAN,
298 if (mask->vlan_id != 0)
299 mlxsw_sp_acl_rulei_keymask_u32(rulei,
300 MLXSW_AFK_ELEMENT_VID,
303 if (mask->vlan_priority != 0)
304 mlxsw_sp_acl_rulei_keymask_u32(rulei,
305 MLXSW_AFK_ELEMENT_PCP,
307 mask->vlan_priority);
310 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
311 mlxsw_sp_flower_parse_ipv4(rulei, f);
313 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
314 mlxsw_sp_flower_parse_ipv6(rulei, f);
316 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
319 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
323 return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
326 int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
327 __be16 protocol, struct tc_cls_flower_offload *f)
329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
330 struct net_device *dev = mlxsw_sp_port->dev;
331 struct mlxsw_sp_acl_rule_info *rulei;
332 struct mlxsw_sp_acl_ruleset *ruleset;
333 struct mlxsw_sp_acl_rule *rule;
336 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
337 MLXSW_SP_ACL_PROFILE_FLOWER);
339 return PTR_ERR(ruleset);
341 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
344 goto err_rule_create;
347 rulei = mlxsw_sp_acl_rule_rulei(rule);
348 err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f);
350 goto err_flower_parse;
352 err = mlxsw_sp_acl_rulei_commit(rulei);
354 goto err_rulei_commit;
356 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
360 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
366 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
368 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
372 void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
373 struct tc_cls_flower_offload *f)
375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
376 struct mlxsw_sp_acl_ruleset *ruleset;
377 struct mlxsw_sp_acl_rule *rule;
379 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
381 MLXSW_SP_ACL_PROFILE_FLOWER);
385 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
387 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
388 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
391 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
394 int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
395 struct tc_cls_flower_offload *f)
397 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
398 struct mlxsw_sp_acl_ruleset *ruleset;
399 struct mlxsw_sp_acl_rule *rule;
407 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
409 MLXSW_SP_ACL_PROFILE_FLOWER);
410 if (WARN_ON(IS_ERR(ruleset)))
413 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
417 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
420 goto err_rule_get_stats;
424 tcf_exts_to_list(f->exts, &actions);
425 list_for_each_entry(a, &actions, list)
426 tcf_action_stats_update(a, bytes, packets, lastuse);
430 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
434 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);