net/mlx5e: Add neighbour hash table to the representors
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
d79b6df6 45#include <net/tc_act/tc_pedit.h>
a54e20b4 46#include <net/vxlan.h>
e8f887ac
AV
47#include "en.h"
48#include "en_tc.h"
1d447a39 49#include "en_rep.h"
03a9d11e 50#include "eswitch.h"
bbd00f7e 51#include "vxlan.h"
e8f887ac 52
3bc4b7bf
OG
53struct mlx5_nic_flow_attr {
54 u32 action;
55 u32 flow_tag;
2f4fe4ca 56 u32 mod_hdr_id;
3bc4b7bf
OG
57};
58
65ba8fb7
OG
59enum {
60 MLX5E_TC_FLOW_ESWITCH = BIT(0),
3bc4b7bf 61 MLX5E_TC_FLOW_NIC = BIT(1),
0b67a38f 62 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
65ba8fb7
OG
63};
64
e8f887ac
AV
65struct mlx5e_tc_flow {
66 struct rhash_head node;
67 u64 cookie;
65ba8fb7 68 u8 flags;
74491de9 69 struct mlx5_flow_handle *rule;
a54e20b4 70 struct list_head encap; /* flows sharing the same encap */
3bc4b7bf
OG
71 union {
72 struct mlx5_esw_flow_attr esw_attr[0];
73 struct mlx5_nic_flow_attr nic_attr[0];
74 };
e8f887ac
AV
75};
76
17091853
OG
77struct mlx5e_tc_flow_parse_attr {
78 struct mlx5_flow_spec spec;
d79b6df6
OG
79 int num_mod_hdr_actions;
80 void *mod_hdr_actions;
17091853
OG
81};
82
a54e20b4
HHZ
83enum {
84 MLX5_HEADER_TYPE_VXLAN = 0x0,
85 MLX5_HEADER_TYPE_NVGRE = 0x1,
86};
87
acff797c
MG
88#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
89#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 90
74491de9
MB
91static struct mlx5_flow_handle *
92mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
17091853 93 struct mlx5e_tc_flow_parse_attr *parse_attr,
aa0cbbae 94 struct mlx5e_tc_flow *flow)
e8f887ac 95{
aa0cbbae 96 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
aad7e08d 97 struct mlx5_core_dev *dev = priv->mdev;
aa0cbbae 98 struct mlx5_flow_destination dest = {};
66958ed9 99 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
100 .action = attr->action,
101 .flow_tag = attr->flow_tag,
66958ed9
HHZ
102 .encap_id = 0,
103 };
aad7e08d 104 struct mlx5_fc *counter = NULL;
74491de9 105 struct mlx5_flow_handle *rule;
e8f887ac 106 bool table_created = false;
2f4fe4ca 107 int err;
e8f887ac 108
3bc4b7bf 109 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
aad7e08d
AV
110 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
111 dest.ft = priv->fs.vlan.ft.t;
3bc4b7bf 112 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
113 counter = mlx5_fc_create(dev, true);
114 if (IS_ERR(counter))
115 return ERR_CAST(counter);
116
117 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
118 dest.counter = counter;
119 }
120
2f4fe4ca
OG
121 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
122 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
123 parse_attr->num_mod_hdr_actions,
124 parse_attr->mod_hdr_actions,
125 &attr->mod_hdr_id);
d7e75a32 126 flow_act.modify_id = attr->mod_hdr_id;
2f4fe4ca
OG
127 kfree(parse_attr->mod_hdr_actions);
128 if (err) {
129 rule = ERR_PTR(err);
130 goto err_create_mod_hdr_id;
131 }
132 }
133
acff797c
MG
134 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
135 priv->fs.tc.t =
136 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
137 MLX5E_TC_PRIO,
138 MLX5E_TC_TABLE_NUM_ENTRIES,
139 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 140 0, 0);
acff797c 141 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
142 netdev_err(priv->netdev,
143 "Failed to create tc offload table\n");
aad7e08d
AV
144 rule = ERR_CAST(priv->fs.tc.t);
145 goto err_create_ft;
e8f887ac
AV
146 }
147
148 table_created = true;
149 }
150
17091853
OG
151 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
152 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
153 &flow_act, &dest, 1);
aad7e08d
AV
154
155 if (IS_ERR(rule))
156 goto err_add_rule;
157
158 return rule;
e8f887ac 159
aad7e08d
AV
160err_add_rule:
161 if (table_created) {
acff797c
MG
162 mlx5_destroy_flow_table(priv->fs.tc.t);
163 priv->fs.tc.t = NULL;
e8f887ac 164 }
aad7e08d 165err_create_ft:
2f4fe4ca
OG
166 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
167 mlx5_modify_header_dealloc(priv->mdev,
168 attr->mod_hdr_id);
169err_create_mod_hdr_id:
aad7e08d 170 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
171
172 return rule;
173}
174
d85cdccb
OG
175static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
176 struct mlx5e_tc_flow *flow)
177{
178 struct mlx5_fc *counter = NULL;
179
aa0cbbae
OG
180 counter = mlx5_flow_rule_counter(flow->rule);
181 mlx5_del_flow_rules(flow->rule);
182 mlx5_fc_destroy(priv->mdev, counter);
d85cdccb
OG
183
184 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
185 mlx5_destroy_flow_table(priv->fs.tc.t);
186 priv->fs.tc.t = NULL;
187 }
2f4fe4ca
OG
188
189 if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
190 mlx5_modify_header_dealloc(priv->mdev,
191 flow->nic_attr->mod_hdr_id);
d85cdccb
OG
192}
193
aa0cbbae
OG
194static void mlx5e_detach_encap(struct mlx5e_priv *priv,
195 struct mlx5e_tc_flow *flow);
196
74491de9
MB
197static struct mlx5_flow_handle *
198mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
17091853 199 struct mlx5e_tc_flow_parse_attr *parse_attr,
aa0cbbae 200 struct mlx5e_tc_flow *flow)
adb4c123
OG
201{
202 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aa0cbbae
OG
203 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
204 struct mlx5_flow_handle *rule;
8b32580d
OG
205 int err;
206
207 err = mlx5_eswitch_add_vlan_action(esw, attr);
aa0cbbae
OG
208 if (err) {
209 rule = ERR_PTR(err);
210 goto err_add_vlan;
211 }
adb4c123 212
d7e75a32
OG
213 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
214 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
215 parse_attr->num_mod_hdr_actions,
216 parse_attr->mod_hdr_actions,
217 &attr->mod_hdr_id);
218 kfree(parse_attr->mod_hdr_actions);
219 if (err) {
220 rule = ERR_PTR(err);
221 goto err_mod_hdr;
222 }
223 }
224
aa0cbbae
OG
225 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
226 if (IS_ERR(rule))
227 goto err_add_rule;
adb4c123 228
aa0cbbae
OG
229 return rule;
230
231err_add_rule:
d7e75a32
OG
232 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
233 mlx5_modify_header_dealloc(priv->mdev,
234 attr->mod_hdr_id);
235err_mod_hdr:
aa0cbbae
OG
236 mlx5_eswitch_del_vlan_action(esw, attr);
237err_add_vlan:
238 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
239 mlx5e_detach_encap(priv, flow);
aa0cbbae
OG
240 return rule;
241}
d85cdccb
OG
242
243static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
244 struct mlx5e_tc_flow *flow)
245{
246 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
d7e75a32 247 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
d85cdccb 248
0b67a38f
HHZ
249 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED)
250 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
d85cdccb 251
ecf5bb79 252 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
d85cdccb 253
ecf5bb79 254 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
d85cdccb 255 mlx5e_detach_encap(priv, flow);
d7e75a32
OG
256
257 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
258 mlx5_modify_header_dealloc(priv->mdev,
259 attr->mod_hdr_id);
d85cdccb
OG
260}
261
262static void mlx5e_detach_encap(struct mlx5e_priv *priv,
263 struct mlx5e_tc_flow *flow)
264{
5067b602
RD
265 struct list_head *next = flow->encap.next;
266
267 list_del(&flow->encap);
268 if (list_empty(next)) {
c1ae1152 269 struct mlx5e_encap_entry *e;
5067b602 270
c1ae1152 271 e = list_entry(next, struct mlx5e_encap_entry, flows);
5067b602
RD
272 if (e->n) {
273 mlx5_encap_dealloc(priv->mdev, e->encap_id);
274 neigh_release(e->n);
275 }
276 hlist_del_rcu(&e->encap_hlist);
277 kfree(e);
278 }
279}
280
e8f887ac 281static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 282 struct mlx5e_tc_flow *flow)
e8f887ac 283{
d85cdccb
OG
284 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
285 mlx5e_tc_del_fdb_flow(priv, flow);
286 else
287 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
288}
289
bbd00f7e
HHZ
290static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
291 struct tc_cls_flower_offload *f)
292{
293 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
294 outer_headers);
295 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
296 outer_headers);
297 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
298 misc_parameters);
299 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
300 misc_parameters);
301
302 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
303 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
304
305 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
306 struct flow_dissector_key_keyid *key =
307 skb_flow_dissector_target(f->dissector,
308 FLOW_DISSECTOR_KEY_ENC_KEYID,
309 f->key);
310 struct flow_dissector_key_keyid *mask =
311 skb_flow_dissector_target(f->dissector,
312 FLOW_DISSECTOR_KEY_ENC_KEYID,
313 f->mask);
314 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
315 be32_to_cpu(mask->keyid));
316 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
317 be32_to_cpu(key->keyid));
318 }
319}
320
321static int parse_tunnel_attr(struct mlx5e_priv *priv,
322 struct mlx5_flow_spec *spec,
323 struct tc_cls_flower_offload *f)
324{
325 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
326 outer_headers);
327 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
328 outer_headers);
329
2e72eb43
OG
330 struct flow_dissector_key_control *enc_control =
331 skb_flow_dissector_target(f->dissector,
332 FLOW_DISSECTOR_KEY_ENC_CONTROL,
333 f->key);
334
bbd00f7e
HHZ
335 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
336 struct flow_dissector_key_ports *key =
337 skb_flow_dissector_target(f->dissector,
338 FLOW_DISSECTOR_KEY_ENC_PORTS,
339 f->key);
340 struct flow_dissector_key_ports *mask =
341 skb_flow_dissector_target(f->dissector,
342 FLOW_DISSECTOR_KEY_ENC_PORTS,
343 f->mask);
1ad9a00a
PB
344 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
345 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
346 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
bbd00f7e
HHZ
347
348 /* Full udp dst port must be given */
349 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 350 goto vxlan_match_offload_err;
bbd00f7e 351
1ad9a00a 352 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
bbd00f7e
HHZ
353 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
354 parse_vxlan_attr(spec, f);
2fcd82e9
OG
355 else {
356 netdev_warn(priv->netdev,
357 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 358 return -EOPNOTSUPP;
2fcd82e9 359 }
bbd00f7e
HHZ
360
361 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
362 udp_dport, ntohs(mask->dst));
363 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
364 udp_dport, ntohs(key->dst));
365
cd377663
OG
366 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
367 udp_sport, ntohs(mask->src));
368 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
369 udp_sport, ntohs(key->src));
bbd00f7e 370 } else { /* udp dst port must be given */
2fcd82e9
OG
371vxlan_match_offload_err:
372 netdev_warn(priv->netdev,
373 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
374 return -EOPNOTSUPP;
bbd00f7e
HHZ
375 }
376
2e72eb43 377 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
378 struct flow_dissector_key_ipv4_addrs *key =
379 skb_flow_dissector_target(f->dissector,
380 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
381 f->key);
382 struct flow_dissector_key_ipv4_addrs *mask =
383 skb_flow_dissector_target(f->dissector,
384 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
385 f->mask);
386 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
387 src_ipv4_src_ipv6.ipv4_layout.ipv4,
388 ntohl(mask->src));
389 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
390 src_ipv4_src_ipv6.ipv4_layout.ipv4,
391 ntohl(key->src));
392
393 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
394 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
395 ntohl(mask->dst));
396 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
397 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
398 ntohl(key->dst));
bbd00f7e 399
2e72eb43
OG
400 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
401 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
402 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
403 struct flow_dissector_key_ipv6_addrs *key =
404 skb_flow_dissector_target(f->dissector,
405 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
406 f->key);
407 struct flow_dissector_key_ipv6_addrs *mask =
408 skb_flow_dissector_target(f->dissector,
409 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
410 f->mask);
411
412 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
413 src_ipv4_src_ipv6.ipv6_layout.ipv6),
414 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
415 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
416 src_ipv4_src_ipv6.ipv6_layout.ipv6),
417 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
418
419 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
420 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
421 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
422 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
423 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
424 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
425
426 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
427 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 428 }
bbd00f7e
HHZ
429
430 /* Enforce DMAC when offloading incoming tunneled flows.
431 * Flow counters require a match on the DMAC.
432 */
433 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
434 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
435 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
436 dmac_47_16), priv->netdev->dev_addr);
437
438 /* let software handle IP fragments */
439 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
440 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
441
442 return 0;
443}
444
de0af0bf
RD
445static int __parse_cls_flower(struct mlx5e_priv *priv,
446 struct mlx5_flow_spec *spec,
447 struct tc_cls_flower_offload *f,
448 u8 *min_inline)
e3a2b7ed 449{
c5bb1730
MG
450 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
451 outer_headers);
452 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
453 outer_headers);
e3a2b7ed
AV
454 u16 addr_type = 0;
455 u8 ip_proto = 0;
456
de0af0bf
RD
457 *min_inline = MLX5_INLINE_MODE_L2;
458
e3a2b7ed
AV
459 if (f->dissector->used_keys &
460 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
461 BIT(FLOW_DISSECTOR_KEY_BASIC) |
462 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 463 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
464 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
465 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
466 BIT(FLOW_DISSECTOR_KEY_PORTS) |
467 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
468 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
469 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
470 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
471 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
472 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
473 f->dissector->used_keys);
474 return -EOPNOTSUPP;
475 }
476
bbd00f7e
HHZ
477 if ((dissector_uses_key(f->dissector,
478 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
479 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
480 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
481 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
482 struct flow_dissector_key_control *key =
483 skb_flow_dissector_target(f->dissector,
484 FLOW_DISSECTOR_KEY_ENC_CONTROL,
485 f->key);
486 switch (key->addr_type) {
487 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 488 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
489 if (parse_tunnel_attr(priv, spec, f))
490 return -EOPNOTSUPP;
491 break;
492 default:
493 return -EOPNOTSUPP;
494 }
495
496 /* In decap flow, header pointers should point to the inner
497 * headers, outer header were already set by parse_tunnel_attr
498 */
499 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
500 inner_headers);
501 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
502 inner_headers);
503 }
504
e3a2b7ed
AV
505 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
506 struct flow_dissector_key_control *key =
507 skb_flow_dissector_target(f->dissector,
1dbd0d37 508 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 509 f->key);
3f7d0eb4
OG
510
511 struct flow_dissector_key_control *mask =
512 skb_flow_dissector_target(f->dissector,
513 FLOW_DISSECTOR_KEY_CONTROL,
514 f->mask);
e3a2b7ed 515 addr_type = key->addr_type;
3f7d0eb4
OG
516
517 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
518 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
519 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
520 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
521
522 /* the HW doesn't need L3 inline to match on frag=no */
523 if (key->flags & FLOW_DIS_IS_FRAGMENT)
524 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 525 }
e3a2b7ed
AV
526 }
527
528 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
529 struct flow_dissector_key_basic *key =
530 skb_flow_dissector_target(f->dissector,
531 FLOW_DISSECTOR_KEY_BASIC,
532 f->key);
533 struct flow_dissector_key_basic *mask =
534 skb_flow_dissector_target(f->dissector,
535 FLOW_DISSECTOR_KEY_BASIC,
536 f->mask);
537 ip_proto = key->ip_proto;
538
539 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
540 ntohs(mask->n_proto));
541 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
542 ntohs(key->n_proto));
543
544 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
545 mask->ip_proto);
546 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
547 key->ip_proto);
de0af0bf
RD
548
549 if (mask->ip_proto)
550 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
551 }
552
553 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
554 struct flow_dissector_key_eth_addrs *key =
555 skb_flow_dissector_target(f->dissector,
556 FLOW_DISSECTOR_KEY_ETH_ADDRS,
557 f->key);
558 struct flow_dissector_key_eth_addrs *mask =
559 skb_flow_dissector_target(f->dissector,
560 FLOW_DISSECTOR_KEY_ETH_ADDRS,
561 f->mask);
562
563 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
564 dmac_47_16),
565 mask->dst);
566 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
567 dmac_47_16),
568 key->dst);
569
570 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
571 smac_47_16),
572 mask->src);
573 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
574 smac_47_16),
575 key->src);
576 }
577
095b6cfd
OG
578 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
579 struct flow_dissector_key_vlan *key =
580 skb_flow_dissector_target(f->dissector,
581 FLOW_DISSECTOR_KEY_VLAN,
582 f->key);
583 struct flow_dissector_key_vlan *mask =
584 skb_flow_dissector_target(f->dissector,
585 FLOW_DISSECTOR_KEY_VLAN,
586 f->mask);
358d79a4 587 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
588 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
589 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
590
591 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
592 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
593
594 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
595 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
596 }
597 }
598
e3a2b7ed
AV
599 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
600 struct flow_dissector_key_ipv4_addrs *key =
601 skb_flow_dissector_target(f->dissector,
602 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
603 f->key);
604 struct flow_dissector_key_ipv4_addrs *mask =
605 skb_flow_dissector_target(f->dissector,
606 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
607 f->mask);
608
609 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
610 src_ipv4_src_ipv6.ipv4_layout.ipv4),
611 &mask->src, sizeof(mask->src));
612 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
613 src_ipv4_src_ipv6.ipv4_layout.ipv4),
614 &key->src, sizeof(key->src));
615 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
616 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
617 &mask->dst, sizeof(mask->dst));
618 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
619 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
620 &key->dst, sizeof(key->dst));
de0af0bf
RD
621
622 if (mask->src || mask->dst)
623 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
624 }
625
626 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
627 struct flow_dissector_key_ipv6_addrs *key =
628 skb_flow_dissector_target(f->dissector,
629 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
630 f->key);
631 struct flow_dissector_key_ipv6_addrs *mask =
632 skb_flow_dissector_target(f->dissector,
633 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
634 f->mask);
635
636 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
637 src_ipv4_src_ipv6.ipv6_layout.ipv6),
638 &mask->src, sizeof(mask->src));
639 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
640 src_ipv4_src_ipv6.ipv6_layout.ipv6),
641 &key->src, sizeof(key->src));
642
643 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
644 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
645 &mask->dst, sizeof(mask->dst));
646 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
647 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
648 &key->dst, sizeof(key->dst));
de0af0bf
RD
649
650 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
651 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
652 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
653 }
654
655 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
656 struct flow_dissector_key_ports *key =
657 skb_flow_dissector_target(f->dissector,
658 FLOW_DISSECTOR_KEY_PORTS,
659 f->key);
660 struct flow_dissector_key_ports *mask =
661 skb_flow_dissector_target(f->dissector,
662 FLOW_DISSECTOR_KEY_PORTS,
663 f->mask);
664 switch (ip_proto) {
665 case IPPROTO_TCP:
666 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
667 tcp_sport, ntohs(mask->src));
668 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
669 tcp_sport, ntohs(key->src));
670
671 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
672 tcp_dport, ntohs(mask->dst));
673 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
674 tcp_dport, ntohs(key->dst));
675 break;
676
677 case IPPROTO_UDP:
678 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
679 udp_sport, ntohs(mask->src));
680 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
681 udp_sport, ntohs(key->src));
682
683 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
684 udp_dport, ntohs(mask->dst));
685 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
686 udp_dport, ntohs(key->dst));
687 break;
688 default:
689 netdev_err(priv->netdev,
690 "Only UDP and TCP transport are supported\n");
691 return -EINVAL;
692 }
de0af0bf
RD
693
694 if (mask->src || mask->dst)
695 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
696 }
697
698 return 0;
699}
700
de0af0bf 701static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 702 struct mlx5e_tc_flow *flow,
de0af0bf
RD
703 struct mlx5_flow_spec *spec,
704 struct tc_cls_flower_offload *f)
705{
706 struct mlx5_core_dev *dev = priv->mdev;
707 struct mlx5_eswitch *esw = dev->priv.eswitch;
1d447a39
SM
708 struct mlx5e_rep_priv *rpriv = priv->ppriv;
709 struct mlx5_eswitch_rep *rep;
de0af0bf
RD
710 u8 min_inline;
711 int err;
712
713 err = __parse_cls_flower(priv, spec, f, &min_inline);
714
1d447a39
SM
715 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
716 rep = rpriv->rep;
717 if (rep->vport != FDB_UPLINK_VPORT &&
718 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
719 esw->offloads.inline_mode < min_inline)) {
de0af0bf
RD
720 netdev_warn(priv->netdev,
721 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
722 min_inline, esw->offloads.inline_mode);
723 return -EOPNOTSUPP;
724 }
725 }
726
727 return err;
728}
729
d79b6df6
OG
730struct pedit_headers {
731 struct ethhdr eth;
732 struct iphdr ip4;
733 struct ipv6hdr ip6;
734 struct tcphdr tcp;
735 struct udphdr udp;
736};
737
738static int pedit_header_offsets[] = {
739 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
740 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
741 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
742 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
743 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
744};
745
746#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
747
748static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
749 struct pedit_headers *masks,
750 struct pedit_headers *vals)
751{
752 u32 *curr_pmask, *curr_pval;
753
754 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
755 goto out_err;
756
757 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
758 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
759
760 if (*curr_pmask & mask) /* disallow acting twice on the same location */
761 goto out_err;
762
763 *curr_pmask |= mask;
764 *curr_pval |= (val & mask);
765
766 return 0;
767
768out_err:
769 return -EOPNOTSUPP;
770}
771
772struct mlx5_fields {
773 u8 field;
774 u8 size;
775 u32 offset;
776};
777
778static struct mlx5_fields fields[] = {
779 {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
780 {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
781 {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
782 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
783 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
784
785 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
786 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
787 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
788 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
789
790 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
791 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
792 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
793 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
794 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
795 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
796 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
797 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
798
799 {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
800 {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
801 {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
802
803 {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
804 {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
805};
806
807/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
808 * max from the SW pedit action. On success, it says how many HW actions were
809 * actually parsed.
810 */
811static int offload_pedit_fields(struct pedit_headers *masks,
812 struct pedit_headers *vals,
813 struct mlx5e_tc_flow_parse_attr *parse_attr)
814{
815 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
816 int i, action_size, nactions, max_actions, first, last;
817 void *s_masks_p, *a_masks_p, *vals_p;
818 u32 s_mask, a_mask, val;
819 struct mlx5_fields *f;
820 u8 cmd, field_bsize;
821 unsigned long mask;
822 void *action;
823
824 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
825 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
826 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
827 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
828
829 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
830 action = parse_attr->mod_hdr_actions;
831 max_actions = parse_attr->num_mod_hdr_actions;
832 nactions = 0;
833
834 for (i = 0; i < ARRAY_SIZE(fields); i++) {
835 f = &fields[i];
836 /* avoid seeing bits set from previous iterations */
837 s_mask = a_mask = mask = val = 0;
838
839 s_masks_p = (void *)set_masks + f->offset;
840 a_masks_p = (void *)add_masks + f->offset;
841
842 memcpy(&s_mask, s_masks_p, f->size);
843 memcpy(&a_mask, a_masks_p, f->size);
844
845 if (!s_mask && !a_mask) /* nothing to offload here */
846 continue;
847
848 if (s_mask && a_mask) {
849 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
850 return -EOPNOTSUPP;
851 }
852
853 if (nactions == max_actions) {
854 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
855 return -EOPNOTSUPP;
856 }
857
858 if (s_mask) {
859 cmd = MLX5_ACTION_TYPE_SET;
860 mask = s_mask;
861 vals_p = (void *)set_vals + f->offset;
862 /* clear to denote we consumed this field */
863 memset(s_masks_p, 0, f->size);
864 } else {
865 cmd = MLX5_ACTION_TYPE_ADD;
866 mask = a_mask;
867 vals_p = (void *)add_vals + f->offset;
868 /* clear to denote we consumed this field */
869 memset(a_masks_p, 0, f->size);
870 }
871
872 memcpy(&val, vals_p, f->size);
873
874 field_bsize = f->size * BITS_PER_BYTE;
875 first = find_first_bit(&mask, field_bsize);
876 last = find_last_bit(&mask, field_bsize);
877 if (first > 0 || last != (field_bsize - 1)) {
878 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
879 mask);
880 return -EOPNOTSUPP;
881 }
882
883 MLX5_SET(set_action_in, action, action_type, cmd);
884 MLX5_SET(set_action_in, action, field, f->field);
885
886 if (cmd == MLX5_ACTION_TYPE_SET) {
887 MLX5_SET(set_action_in, action, offset, 0);
888 /* length is num of bits to be written, zero means length of 32 */
889 MLX5_SET(set_action_in, action, length, field_bsize);
890 }
891
892 if (field_bsize == 32)
893 MLX5_SET(set_action_in, action, data, ntohl(val));
894 else if (field_bsize == 16)
895 MLX5_SET(set_action_in, action, data, ntohs(val));
896 else if (field_bsize == 8)
897 MLX5_SET(set_action_in, action, data, val);
898
899 action += action_size;
900 nactions++;
901 }
902
903 parse_attr->num_mod_hdr_actions = nactions;
904 return 0;
905}
906
907static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
908 const struct tc_action *a, int namespace,
909 struct mlx5e_tc_flow_parse_attr *parse_attr)
910{
911 int nkeys, action_size, max_actions;
912
913 nkeys = tcf_pedit_nkeys(a);
914 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
915
916 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
917 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
918 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
919 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
920
921 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
922 max_actions = min(max_actions, nkeys * 16);
923
924 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
925 if (!parse_attr->mod_hdr_actions)
926 return -ENOMEM;
927
928 parse_attr->num_mod_hdr_actions = max_actions;
929 return 0;
930}
931
932static const struct pedit_headers zero_masks = {};
933
934static int parse_tc_pedit_action(struct mlx5e_priv *priv,
935 const struct tc_action *a, int namespace,
936 struct mlx5e_tc_flow_parse_attr *parse_attr)
937{
938 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
939 int nkeys, i, err = -EOPNOTSUPP;
940 u32 mask, val, offset;
941 u8 cmd, htype;
942
943 nkeys = tcf_pedit_nkeys(a);
944
945 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
946 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
947
948 for (i = 0; i < nkeys; i++) {
949 htype = tcf_pedit_htype(a, i);
950 cmd = tcf_pedit_cmd(a, i);
951 err = -EOPNOTSUPP; /* can't be all optimistic */
952
953 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
954 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
955 goto out_err;
956 }
957
958 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
959 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
960 goto out_err;
961 }
962
963 mask = tcf_pedit_mask(a, i);
964 val = tcf_pedit_val(a, i);
965 offset = tcf_pedit_offset(a, i);
966
967 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
968 if (err)
969 goto out_err;
970 }
971
972 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
973 if (err)
974 goto out_err;
975
976 err = offload_pedit_fields(masks, vals, parse_attr);
977 if (err < 0)
978 goto out_dealloc_parsed_actions;
979
980 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
981 cmd_masks = &masks[cmd];
982 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
983 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
984 cmd);
985 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
986 16, 1, cmd_masks, sizeof(zero_masks), true);
987 err = -EOPNOTSUPP;
988 goto out_dealloc_parsed_actions;
989 }
990 }
991
992 return 0;
993
994out_dealloc_parsed_actions:
995 kfree(parse_attr->mod_hdr_actions);
996out_err:
997 return err;
998}
999
5c40348c 1000static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
aa0cbbae
OG
1001 struct mlx5e_tc_flow_parse_attr *parse_attr,
1002 struct mlx5e_tc_flow *flow)
e3a2b7ed 1003{
aa0cbbae 1004 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
e3a2b7ed 1005 const struct tc_action *a;
22dc13c8 1006 LIST_HEAD(actions);
2f4fe4ca 1007 int err;
e3a2b7ed
AV
1008
1009 if (tc_no_actions(exts))
1010 return -EINVAL;
1011
3bc4b7bf
OG
1012 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1013 attr->action = 0;
e3a2b7ed 1014
22dc13c8
WC
1015 tcf_exts_to_list(exts, &actions);
1016 list_for_each_entry(a, &actions, list) {
e3a2b7ed 1017 /* Only support a single action per rule */
3bc4b7bf 1018 if (attr->action)
e3a2b7ed
AV
1019 return -EINVAL;
1020
1021 if (is_tcf_gact_shot(a)) {
3bc4b7bf 1022 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
1023 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1024 flow_table_properties_nic_receive.flow_counter))
3bc4b7bf 1025 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
1026 continue;
1027 }
1028
2f4fe4ca
OG
1029 if (is_tcf_pedit(a)) {
1030 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1031 parse_attr);
1032 if (err)
1033 return err;
1034
1035 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1036 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1037 continue;
1038 }
1039
e3a2b7ed
AV
1040 if (is_tcf_skbedit_mark(a)) {
1041 u32 mark = tcf_skbedit_mark(a);
1042
1043 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1044 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1045 mark);
1046 return -EINVAL;
1047 }
1048
3bc4b7bf
OG
1049 attr->flow_tag = mark;
1050 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e3a2b7ed
AV
1051 continue;
1052 }
1053
1054 return -EINVAL;
1055 }
1056
1057 return 0;
1058}
1059
76f7444d
OG
1060static inline int cmp_encap_info(struct ip_tunnel_key *a,
1061 struct ip_tunnel_key *b)
a54e20b4
HHZ
1062{
1063 return memcmp(a, b, sizeof(*a));
1064}
1065
76f7444d 1066static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 1067{
76f7444d 1068 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
1069}
1070
1071static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1072 struct net_device *mirred_dev,
1073 struct net_device **out_dev,
1074 struct flowi4 *fl4,
1075 struct neighbour **out_n,
a54e20b4
HHZ
1076 int *out_ttl)
1077{
3e621b19 1078 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
1079 struct rtable *rt;
1080 struct neighbour *n = NULL;
a54e20b4
HHZ
1081
1082#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
1083 int ret;
1084
a54e20b4 1085 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
1086 ret = PTR_ERR_OR_ZERO(rt);
1087 if (ret)
1088 return ret;
a54e20b4
HHZ
1089#else
1090 return -EOPNOTSUPP;
1091#endif
3e621b19
HHZ
1092 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1093 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1094 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1095 else
1096 *out_dev = rt->dst.dev;
a54e20b4 1097
75c33da8 1098 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
1099 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1100 ip_rt_put(rt);
1101 if (!n)
1102 return -ENOMEM;
1103
1104 *out_n = n;
a54e20b4
HHZ
1105 return 0;
1106}
1107
ce99f6b9
OG
1108static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1109 struct net_device *mirred_dev,
1110 struct net_device **out_dev,
1111 struct flowi6 *fl6,
1112 struct neighbour **out_n,
1113 int *out_ttl)
1114{
1115 struct neighbour *n = NULL;
1116 struct dst_entry *dst;
1117
1118#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1119 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1120 int ret;
1121
1122 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
1123 ret = dst->error;
1124 if (ret) {
ce99f6b9
OG
1125 dst_release(dst);
1126 return ret;
1127 }
1128
1129 *out_ttl = ip6_dst_hoplimit(dst);
1130
1131 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1132 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1133 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1134 else
1135 *out_dev = dst->dev;
1136#else
1137 return -EOPNOTSUPP;
1138#endif
1139
1140 n = dst_neigh_lookup(dst, &fl6->daddr);
1141 dst_release(dst);
1142 if (!n)
1143 return -ENOMEM;
1144
1145 *out_n = n;
1146 return 0;
1147}
1148
32f3671f
OG
1149static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1150 char buf[], int encap_size,
1151 unsigned char h_dest[ETH_ALEN],
1152 int ttl,
1153 __be32 daddr,
1154 __be32 saddr,
1155 __be16 udp_dst_port,
1156 __be32 vx_vni)
a54e20b4 1157{
a54e20b4
HHZ
1158 struct ethhdr *eth = (struct ethhdr *)buf;
1159 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1160 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1161 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1162
1163 memset(buf, 0, encap_size);
1164
1165 ether_addr_copy(eth->h_dest, h_dest);
1166 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1167 eth->h_proto = htons(ETH_P_IP);
1168
1169 ip->daddr = daddr;
1170 ip->saddr = saddr;
1171
1172 ip->ttl = ttl;
1173 ip->protocol = IPPROTO_UDP;
1174 ip->version = 0x4;
1175 ip->ihl = 0x5;
1176
1177 udp->dest = udp_dst_port;
1178 vxh->vx_flags = VXLAN_HF_VNI;
1179 vxh->vx_vni = vxlan_vni_field(vx_vni);
a54e20b4
HHZ
1180}
1181
225aabaf
OG
1182static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1183 char buf[], int encap_size,
1184 unsigned char h_dest[ETH_ALEN],
1185 int ttl,
1186 struct in6_addr *daddr,
1187 struct in6_addr *saddr,
1188 __be16 udp_dst_port,
1189 __be32 vx_vni)
ce99f6b9 1190{
ce99f6b9
OG
1191 struct ethhdr *eth = (struct ethhdr *)buf;
1192 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1193 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1194 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1195
1196 memset(buf, 0, encap_size);
1197
1198 ether_addr_copy(eth->h_dest, h_dest);
1199 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1200 eth->h_proto = htons(ETH_P_IPV6);
1201
1202 ip6_flow_hdr(ip6h, 0, 0);
1203 /* the HW fills up ipv6 payload len */
1204 ip6h->nexthdr = IPPROTO_UDP;
1205 ip6h->hop_limit = ttl;
1206 ip6h->daddr = *daddr;
1207 ip6h->saddr = *saddr;
1208
1209 udp->dest = udp_dst_port;
1210 vxh->vx_flags = VXLAN_HF_VNI;
1211 vxh->vx_vni = vxlan_vni_field(vx_vni);
ce99f6b9
OG
1212}
1213
a54e20b4
HHZ
1214static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1215 struct net_device *mirred_dev,
1a8552bd 1216 struct mlx5e_encap_entry *e)
a54e20b4
HHZ
1217{
1218 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
32f3671f 1219 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
76f7444d 1220 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 1221 struct net_device *out_dev;
a42485eb 1222 struct neighbour *n = NULL;
a54e20b4 1223 struct flowi4 fl4 = {};
a54e20b4 1224 char *encap_header;
32f3671f 1225 int ttl, err;
033354d5 1226 u8 nud_state;
32f3671f
OG
1227
1228 if (max_encap_size < ipv4_encap_size) {
1229 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1230 ipv4_encap_size, max_encap_size);
1231 return -EOPNOTSUPP;
1232 }
a54e20b4 1233
32f3671f 1234 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
a54e20b4
HHZ
1235 if (!encap_header)
1236 return -ENOMEM;
1237
1238 switch (e->tunnel_type) {
1239 case MLX5_HEADER_TYPE_VXLAN:
1240 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 1241 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
1242 break;
1243 default:
1244 err = -EOPNOTSUPP;
1245 goto out;
1246 }
9a941117 1247 fl4.flowi4_tos = tun_key->tos;
76f7444d 1248 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 1249 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4 1250
1a8552bd 1251 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
9a941117 1252 &fl4, &n, &ttl);
a54e20b4
HHZ
1253 if (err)
1254 goto out;
1255
033354d5
HHZ
1256 read_lock_bh(&n->lock);
1257 nud_state = n->nud_state;
1258 ether_addr_copy(e->h_dest, n->ha);
1259 read_unlock_bh(&n->lock);
1260
1261 if (!(nud_state & NUD_VALID)) {
a42485eb
OG
1262 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
1263 err = -EOPNOTSUPP;
a54e20b4
HHZ
1264 goto out;
1265 }
1266
75c33da8 1267 e->n = n;
1a8552bd 1268 e->out_dev = out_dev;
75c33da8 1269
a54e20b4
HHZ
1270 switch (e->tunnel_type) {
1271 case MLX5_HEADER_TYPE_VXLAN:
1a8552bd 1272 gen_vxlan_header_ipv4(out_dev, encap_header,
32f3671f
OG
1273 ipv4_encap_size, e->h_dest, ttl,
1274 fl4.daddr,
1275 fl4.saddr, tun_key->tp_dst,
1276 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
1277 break;
1278 default:
1279 err = -EOPNOTSUPP;
1280 goto out;
1281 }
1282
1283 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
32f3671f 1284 ipv4_encap_size, encap_header, &e->encap_id);
a54e20b4 1285out:
a42485eb
OG
1286 if (err && n)
1287 neigh_release(n);
a54e20b4
HHZ
1288 kfree(encap_header);
1289 return err;
1290}
1291
ce99f6b9
OG
1292static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1293 struct net_device *mirred_dev,
1a8552bd 1294 struct mlx5e_encap_entry *e)
ce99f6b9
OG
1295{
1296 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
225aabaf 1297 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
ce99f6b9 1298 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 1299 struct net_device *out_dev;
ce99f6b9
OG
1300 struct neighbour *n = NULL;
1301 struct flowi6 fl6 = {};
1302 char *encap_header;
225aabaf 1303 int err, ttl = 0;
033354d5 1304 u8 nud_state;
ce99f6b9 1305
225aabaf
OG
1306 if (max_encap_size < ipv6_encap_size) {
1307 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1308 ipv6_encap_size, max_encap_size);
1309 return -EOPNOTSUPP;
1310 }
ce99f6b9 1311
225aabaf 1312 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
ce99f6b9
OG
1313 if (!encap_header)
1314 return -ENOMEM;
1315
1316 switch (e->tunnel_type) {
1317 case MLX5_HEADER_TYPE_VXLAN:
1318 fl6.flowi6_proto = IPPROTO_UDP;
1319 fl6.fl6_dport = tun_key->tp_dst;
1320 break;
1321 default:
1322 err = -EOPNOTSUPP;
1323 goto out;
1324 }
1325
1326 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1327 fl6.daddr = tun_key->u.ipv6.dst;
1328 fl6.saddr = tun_key->u.ipv6.src;
1329
1a8552bd 1330 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
ce99f6b9
OG
1331 &fl6, &n, &ttl);
1332 if (err)
1333 goto out;
1334
033354d5
HHZ
1335 read_lock_bh(&n->lock);
1336 nud_state = n->nud_state;
1337 ether_addr_copy(e->h_dest, n->ha);
1338 read_unlock_bh(&n->lock);
1339
1340 if (!(nud_state & NUD_VALID)) {
ce99f6b9
OG
1341 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
1342 err = -EOPNOTSUPP;
1343 goto out;
1344 }
1345
1346 e->n = n;
1a8552bd 1347 e->out_dev = out_dev;
ce99f6b9 1348
ce99f6b9
OG
1349 switch (e->tunnel_type) {
1350 case MLX5_HEADER_TYPE_VXLAN:
1a8552bd 1351 gen_vxlan_header_ipv6(out_dev, encap_header,
225aabaf
OG
1352 ipv6_encap_size, e->h_dest, ttl,
1353 &fl6.daddr,
1354 &fl6.saddr, tun_key->tp_dst,
1355 tunnel_id_to_key32(tun_key->tun_id));
ce99f6b9
OG
1356 break;
1357 default:
1358 err = -EOPNOTSUPP;
1359 goto out;
1360 }
1361
1362 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
225aabaf 1363 ipv6_encap_size, encap_header, &e->encap_id);
ce99f6b9
OG
1364out:
1365 if (err && n)
1366 neigh_release(n);
1367 kfree(encap_header);
1368 return err;
1369}
1370
a54e20b4
HHZ
1371static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1372 struct ip_tunnel_info *tun_info,
1373 struct net_device *mirred_dev,
45247bf2
OG
1374 struct net_device **encap_dev,
1375 struct mlx5e_tc_flow *flow)
a54e20b4
HHZ
1376{
1377 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1ad9a00a 1378 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
a54e20b4 1379 unsigned short family = ip_tunnel_info_af(tun_info);
45247bf2
OG
1380 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1381 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
a54e20b4 1382 struct ip_tunnel_key *key = &tun_info->key;
c1ae1152 1383 struct mlx5e_encap_entry *e;
45247bf2 1384 int tunnel_type, err = 0;
a54e20b4
HHZ
1385 uintptr_t hash_key;
1386 bool found = false;
a54e20b4 1387
2fcd82e9 1388 /* udp dst port must be set */
a54e20b4 1389 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 1390 goto vxlan_encap_offload_err;
a54e20b4 1391
cd377663 1392 /* setting udp src port isn't supported */
2fcd82e9
OG
1393 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1394vxlan_encap_offload_err:
1395 netdev_warn(priv->netdev,
1396 "must set udp dst port and not set udp src port\n");
cd377663 1397 return -EOPNOTSUPP;
2fcd82e9 1398 }
cd377663 1399
1ad9a00a 1400 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
a54e20b4 1401 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1402 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1403 } else {
2fcd82e9
OG
1404 netdev_warn(priv->netdev,
1405 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1406 return -EOPNOTSUPP;
1407 }
1408
76f7444d 1409 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1410
1411 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1412 encap_hlist, hash_key) {
76f7444d 1413 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1414 found = true;
1415 break;
1416 }
1417 }
1418
45247bf2
OG
1419 if (found)
1420 goto attach_flow;
a54e20b4
HHZ
1421
1422 e = kzalloc(sizeof(*e), GFP_KERNEL);
1423 if (!e)
1424 return -ENOMEM;
1425
76f7444d 1426 e->tun_info = *tun_info;
a54e20b4
HHZ
1427 e->tunnel_type = tunnel_type;
1428 INIT_LIST_HEAD(&e->flows);
1429
ce99f6b9 1430 if (family == AF_INET)
1a8552bd 1431 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
ce99f6b9 1432 else if (family == AF_INET6)
1a8552bd 1433 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
ce99f6b9 1434
a54e20b4
HHZ
1435 if (err)
1436 goto out_err;
1437
a54e20b4
HHZ
1438 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1439
45247bf2
OG
1440attach_flow:
1441 list_add(&flow->encap, &e->flows);
1442 *encap_dev = e->out_dev;
1443 attr->encap_id = e->encap_id;
1444
1445 return 0;
a54e20b4
HHZ
1446
1447out_err:
1448 kfree(e);
1449 return err;
1450}
1451
03a9d11e 1452static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
d7e75a32 1453 struct mlx5e_tc_flow_parse_attr *parse_attr,
a54e20b4 1454 struct mlx5e_tc_flow *flow)
03a9d11e 1455{
ecf5bb79 1456 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1d447a39 1457 struct mlx5e_rep_priv *rpriv = priv->ppriv;
a54e20b4 1458 struct ip_tunnel_info *info = NULL;
03a9d11e 1459 const struct tc_action *a;
22dc13c8 1460 LIST_HEAD(actions);
a54e20b4
HHZ
1461 bool encap = false;
1462 int err;
03a9d11e
OG
1463
1464 if (tc_no_actions(exts))
1465 return -EINVAL;
1466
776b12b6 1467 memset(attr, 0, sizeof(*attr));
1d447a39 1468 attr->in_rep = rpriv->rep;
03a9d11e 1469
22dc13c8
WC
1470 tcf_exts_to_list(exts, &actions);
1471 list_for_each_entry(a, &actions, list) {
03a9d11e 1472 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1473 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1474 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1475 continue;
1476 }
1477
d7e75a32
OG
1478 if (is_tcf_pedit(a)) {
1479 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1480 parse_attr);
1481 if (err)
1482 return err;
1483
1484 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1485 continue;
1486 }
1487
5724b8b5 1488 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e 1489 int ifindex = tcf_mirred_ifindex(a);
45247bf2 1490 struct net_device *out_dev, *encap_dev = NULL;
03a9d11e 1491 struct mlx5e_priv *out_priv;
03a9d11e
OG
1492
1493 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1494
a54e20b4
HHZ
1495 if (switchdev_port_same_parent_id(priv->netdev,
1496 out_dev)) {
1497 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1498 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1499 out_priv = netdev_priv(out_dev);
1d447a39
SM
1500 rpriv = out_priv->ppriv;
1501 attr->out_rep = rpriv->rep;
a54e20b4
HHZ
1502 } else if (encap) {
1503 err = mlx5e_attach_encap(priv, info,
45247bf2 1504 out_dev, &encap_dev, flow);
a54e20b4
HHZ
1505 if (err)
1506 return err;
a54e20b4
HHZ
1507 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1508 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1509 MLX5_FLOW_CONTEXT_ACTION_COUNT;
45247bf2 1510 out_priv = netdev_priv(encap_dev);
1d447a39
SM
1511 rpriv = out_priv->ppriv;
1512 attr->out_rep = rpriv->rep;
a54e20b4 1513 } else {
03a9d11e
OG
1514 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1515 priv->netdev->name, out_dev->name);
1516 return -EINVAL;
1517 }
a54e20b4
HHZ
1518 continue;
1519 }
03a9d11e 1520
a54e20b4
HHZ
1521 if (is_tcf_tunnel_set(a)) {
1522 info = tcf_tunnel_info(a);
1523 if (info)
1524 encap = true;
1525 else
1526 return -EOPNOTSUPP;
03a9d11e
OG
1527 continue;
1528 }
1529
8b32580d 1530 if (is_tcf_vlan(a)) {
09c91ddf 1531 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8b32580d 1532 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
09c91ddf 1533 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8b32580d
OG
1534 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1535 return -EOPNOTSUPP;
1536
1537 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1538 attr->vlan = tcf_vlan_push_vid(a);
09c91ddf
OG
1539 } else { /* action is TCA_VLAN_ACT_MODIFY */
1540 return -EOPNOTSUPP;
8b32580d
OG
1541 }
1542 continue;
1543 }
1544
bbd00f7e
HHZ
1545 if (is_tcf_tunnel_release(a)) {
1546 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1547 continue;
1548 }
1549
03a9d11e
OG
1550 return -EINVAL;
1551 }
1552 return 0;
1553}
1554
e3a2b7ed
AV
1555int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1556 struct tc_cls_flower_offload *f)
1557{
3bc4b7bf 1558 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
17091853 1559 struct mlx5e_tc_flow_parse_attr *parse_attr;
acff797c 1560 struct mlx5e_tc_table *tc = &priv->fs.tc;
3bc4b7bf
OG
1561 struct mlx5e_tc_flow *flow;
1562 int attr_size, err = 0;
65ba8fb7 1563 u8 flow_flags = 0;
e3a2b7ed 1564
65ba8fb7
OG
1565 if (esw && esw->mode == SRIOV_OFFLOADS) {
1566 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1567 attr_size = sizeof(struct mlx5_esw_flow_attr);
3bc4b7bf
OG
1568 } else {
1569 flow_flags = MLX5E_TC_FLOW_NIC;
1570 attr_size = sizeof(struct mlx5_nic_flow_attr);
65ba8fb7 1571 }
e3a2b7ed 1572
65ba8fb7 1573 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
17091853
OG
1574 parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
1575 if (!parse_attr || !flow) {
e3a2b7ed
AV
1576 err = -ENOMEM;
1577 goto err_free;
1578 }
1579
1580 flow->cookie = f->cookie;
65ba8fb7 1581 flow->flags = flow_flags;
e3a2b7ed 1582
17091853 1583 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
e3a2b7ed
AV
1584 if (err < 0)
1585 goto err_free;
1586
65ba8fb7 1587 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
d7e75a32 1588 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
adb4c123
OG
1589 if (err < 0)
1590 goto err_free;
aa0cbbae 1591 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
adb4c123 1592 } else {
aa0cbbae 1593 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
adb4c123
OG
1594 if (err < 0)
1595 goto err_free;
aa0cbbae 1596 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
adb4c123 1597 }
e3a2b7ed 1598
e3a2b7ed
AV
1599 if (IS_ERR(flow->rule)) {
1600 err = PTR_ERR(flow->rule);
aa0cbbae 1601 goto err_free;
e3a2b7ed
AV
1602 }
1603
0b67a38f 1604 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
5c40348c
OG
1605 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1606 tc->ht_params);
1607 if (err)
1608 goto err_del_rule;
1609
e3a2b7ed
AV
1610 goto out;
1611
5c40348c 1612err_del_rule:
5e86397a 1613 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1614
1615err_free:
53636068 1616 kfree(flow);
e3a2b7ed 1617out:
17091853 1618 kvfree(parse_attr);
e3a2b7ed
AV
1619 return err;
1620}
1621
1622int mlx5e_delete_flower(struct mlx5e_priv *priv,
1623 struct tc_cls_flower_offload *f)
1624{
1625 struct mlx5e_tc_flow *flow;
acff797c 1626 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1627
1628 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1629 tc->ht_params);
1630 if (!flow)
1631 return -EINVAL;
1632
1633 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1634
961e8979 1635 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1636
a54e20b4 1637
e3a2b7ed
AV
1638 kfree(flow);
1639
1640 return 0;
1641}
1642
aad7e08d
AV
1643int mlx5e_stats_flower(struct mlx5e_priv *priv,
1644 struct tc_cls_flower_offload *f)
1645{
1646 struct mlx5e_tc_table *tc = &priv->fs.tc;
1647 struct mlx5e_tc_flow *flow;
1648 struct tc_action *a;
1649 struct mlx5_fc *counter;
22dc13c8 1650 LIST_HEAD(actions);
aad7e08d
AV
1651 u64 bytes;
1652 u64 packets;
1653 u64 lastuse;
1654
1655 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1656 tc->ht_params);
1657 if (!flow)
1658 return -EINVAL;
1659
0b67a38f
HHZ
1660 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
1661 return 0;
1662
aad7e08d
AV
1663 counter = mlx5_flow_rule_counter(flow->rule);
1664 if (!counter)
1665 return 0;
1666
1667 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1668
fed06ee8
OG
1669 preempt_disable();
1670
22dc13c8
WC
1671 tcf_exts_to_list(f->exts, &actions);
1672 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1673 tcf_action_stats_update(a, bytes, packets, lastuse);
1674
fed06ee8
OG
1675 preempt_enable();
1676
aad7e08d
AV
1677 return 0;
1678}
1679
e8f887ac
AV
1680static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1681 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1682 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1683 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1684 .automatic_shrinking = true,
1685};
1686
1687int mlx5e_tc_init(struct mlx5e_priv *priv)
1688{
acff797c 1689 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1690
1691 tc->ht_params = mlx5e_tc_flow_ht_params;
1692 return rhashtable_init(&tc->ht, &tc->ht_params);
1693}
1694
1695static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1696{
1697 struct mlx5e_tc_flow *flow = ptr;
1698 struct mlx5e_priv *priv = arg;
1699
961e8979 1700 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1701 kfree(flow);
1702}
1703
1704void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1705{
acff797c 1706 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1707
1708 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1709
acff797c
MG
1710 if (!IS_ERR_OR_NULL(tc->t)) {
1711 mlx5_destroy_flow_table(tc->t);
1712 tc->t = NULL;
e8f887ac
AV
1713 }
1714}