net/mlx5e: For TC offloads, always add new flow instead of appending the actions
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
d79b6df6 45#include <net/tc_act/tc_pedit.h>
26c02749 46#include <net/tc_act/tc_csum.h>
a54e20b4 47#include <net/vxlan.h>
f6dfb4c3 48#include <net/arp.h>
e8f887ac 49#include "en.h"
1d447a39 50#include "en_rep.h"
232c0013 51#include "en_tc.h"
03a9d11e 52#include "eswitch.h"
358aa5ce 53#include "lib/vxlan.h"
3f6d08d1 54#include "fs_core.h"
2c81bfd5 55#include "en/port.h"
e8f887ac 56
3bc4b7bf
OG
57struct mlx5_nic_flow_attr {
58 u32 action;
59 u32 flow_tag;
2f4fe4ca 60 u32 mod_hdr_id;
5c65c564 61 u32 hairpin_tirn;
38aa51c1 62 u8 match_level;
3f6d08d1 63 struct mlx5_flow_table *hairpin_ft;
b8aee822 64 struct mlx5_fc *counter;
3bc4b7bf
OG
65};
66
60bd4af8
OG
67#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
68
65ba8fb7 69enum {
60bd4af8
OG
70 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
71 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
72 MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
73 MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
74 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
75 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
76 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
65ba8fb7
OG
77};
78
e4ad91f2
CM
79#define MLX5E_TC_MAX_SPLITS 1
80
e8f887ac
AV
81struct mlx5e_tc_flow {
82 struct rhash_head node;
655dc3d2 83 struct mlx5e_priv *priv;
e8f887ac 84 u64 cookie;
65ba8fb7 85 u8 flags;
e4ad91f2 86 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
11c9c548
OG
87 struct list_head encap; /* flows sharing the same encap ID */
88 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
5c65c564 89 struct list_head hairpin; /* flows sharing the same hairpin */
3bc4b7bf
OG
90 union {
91 struct mlx5_esw_flow_attr esw_attr[0];
92 struct mlx5_nic_flow_attr nic_attr[0];
93 };
e8f887ac
AV
94};
95
17091853 96struct mlx5e_tc_flow_parse_attr {
3c37745e 97 struct ip_tunnel_info tun_info;
17091853 98 struct mlx5_flow_spec spec;
d79b6df6
OG
99 int num_mod_hdr_actions;
100 void *mod_hdr_actions;
3c37745e 101 int mirred_ifindex;
17091853
OG
102};
103
acff797c 104#define MLX5E_TC_TABLE_NUM_GROUPS 4
b3a433de 105#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
e8f887ac 106
77ab67b7
OG
107struct mlx5e_hairpin {
108 struct mlx5_hairpin *pair;
109
110 struct mlx5_core_dev *func_mdev;
3f6d08d1 111 struct mlx5e_priv *func_priv;
77ab67b7
OG
112 u32 tdn;
113 u32 tirn;
3f6d08d1
OG
114
115 int num_channels;
116 struct mlx5e_rqt indir_rqt;
117 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
118 struct mlx5e_ttc_table ttc;
77ab67b7
OG
119};
120
5c65c564
OG
121struct mlx5e_hairpin_entry {
122 /* a node of a hash table which keeps all the hairpin entries */
123 struct hlist_node hairpin_hlist;
124
125 /* flows sharing the same hairpin */
126 struct list_head flows;
127
d8822868 128 u16 peer_vhca_id;
106be53b 129 u8 prio;
5c65c564
OG
130 struct mlx5e_hairpin *hp;
131};
132
11c9c548
OG
133struct mod_hdr_key {
134 int num_actions;
135 void *actions;
136};
137
138struct mlx5e_mod_hdr_entry {
139 /* a node of a hash table which keeps all the mod_hdr entries */
140 struct hlist_node mod_hdr_hlist;
141
142 /* flows sharing the same mod_hdr entry */
143 struct list_head flows;
144
145 struct mod_hdr_key key;
146
147 u32 mod_hdr_id;
148};
149
150#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
151
152static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
153{
154 return jhash(key->actions,
155 key->num_actions * MLX5_MH_ACT_SZ, 0);
156}
157
158static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
159 struct mod_hdr_key *b)
160{
161 if (a->num_actions != b->num_actions)
162 return 1;
163
164 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
165}
166
167static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
168 struct mlx5e_tc_flow *flow,
169 struct mlx5e_tc_flow_parse_attr *parse_attr)
170{
171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
172 int num_actions, actions_size, namespace, err;
173 struct mlx5e_mod_hdr_entry *mh;
174 struct mod_hdr_key key;
175 bool found = false;
176 u32 hash_key;
177
178 num_actions = parse_attr->num_mod_hdr_actions;
179 actions_size = MLX5_MH_ACT_SZ * num_actions;
180
181 key.actions = parse_attr->mod_hdr_actions;
182 key.num_actions = num_actions;
183
184 hash_key = hash_mod_hdr_info(&key);
185
186 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
187 namespace = MLX5_FLOW_NAMESPACE_FDB;
188 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
189 mod_hdr_hlist, hash_key) {
190 if (!cmp_mod_hdr_info(&mh->key, &key)) {
191 found = true;
192 break;
193 }
194 }
195 } else {
196 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
197 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
198 mod_hdr_hlist, hash_key) {
199 if (!cmp_mod_hdr_info(&mh->key, &key)) {
200 found = true;
201 break;
202 }
203 }
204 }
205
206 if (found)
207 goto attach_flow;
208
209 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
210 if (!mh)
211 return -ENOMEM;
212
213 mh->key.actions = (void *)mh + sizeof(*mh);
214 memcpy(mh->key.actions, key.actions, actions_size);
215 mh->key.num_actions = num_actions;
216 INIT_LIST_HEAD(&mh->flows);
217
218 err = mlx5_modify_header_alloc(priv->mdev, namespace,
219 mh->key.num_actions,
220 mh->key.actions,
221 &mh->mod_hdr_id);
222 if (err)
223 goto out_err;
224
225 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
226 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
227 else
228 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
229
230attach_flow:
231 list_add(&flow->mod_hdr, &mh->flows);
232 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
233 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
234 else
235 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
236
237 return 0;
238
239out_err:
240 kfree(mh);
241 return err;
242}
243
244static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
245 struct mlx5e_tc_flow *flow)
246{
247 struct list_head *next = flow->mod_hdr.next;
248
249 list_del(&flow->mod_hdr);
250
251 if (list_empty(next)) {
252 struct mlx5e_mod_hdr_entry *mh;
253
254 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
255
256 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
257 hash_del(&mh->mod_hdr_hlist);
258 kfree(mh);
259 }
260}
261
77ab67b7
OG
262static
263struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
264{
265 struct net_device *netdev;
266 struct mlx5e_priv *priv;
267
268 netdev = __dev_get_by_index(net, ifindex);
269 priv = netdev_priv(netdev);
270 return priv->mdev;
271}
272
273static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
274{
275 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
276 void *tirc;
277 int err;
278
279 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
280 if (err)
281 goto alloc_tdn_err;
282
283 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
284
285 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
ddae74ac 286 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
77ab67b7
OG
287 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
288
289 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
290 if (err)
291 goto create_tir_err;
292
293 return 0;
294
295create_tir_err:
296 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
297alloc_tdn_err:
298 return err;
299}
300
301static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
302{
303 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
304 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
305}
306
3f6d08d1
OG
307static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
308{
309 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
310 struct mlx5e_priv *priv = hp->func_priv;
311 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
312
313 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
314 hp->num_channels);
315
316 for (i = 0; i < sz; i++) {
317 ix = i;
318 if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
319 ix = mlx5e_bits_invert(i, ilog2(sz));
320 ix = indirection_rqt[ix];
321 rqn = hp->pair->rqn[ix];
322 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
323 }
324}
325
326static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
327{
328 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
329 struct mlx5e_priv *priv = hp->func_priv;
330 struct mlx5_core_dev *mdev = priv->mdev;
331 void *rqtc;
332 u32 *in;
333
334 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
335 in = kvzalloc(inlen, GFP_KERNEL);
336 if (!in)
337 return -ENOMEM;
338
339 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
340
341 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
342 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
343
344 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
345
346 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
347 if (!err)
348 hp->indir_rqt.enabled = true;
349
350 kvfree(in);
351 return err;
352}
353
354static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
355{
356 struct mlx5e_priv *priv = hp->func_priv;
357 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
358 int tt, i, err;
359 void *tirc;
360
361 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
362 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
363 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
364
365 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
366 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
367 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
368 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
369
370 err = mlx5_core_create_tir(hp->func_mdev, in,
371 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
372 if (err) {
373 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
374 goto err_destroy_tirs;
375 }
376 }
377 return 0;
378
379err_destroy_tirs:
380 for (i = 0; i < tt; i++)
381 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
382 return err;
383}
384
385static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
386{
387 int tt;
388
389 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
390 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
391}
392
393static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
394 struct ttc_params *ttc_params)
395{
396 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
397 int tt;
398
399 memset(ttc_params, 0, sizeof(*ttc_params));
400
401 ttc_params->any_tt_tirn = hp->tirn;
402
403 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
404 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
405
406 ft_attr->max_fte = MLX5E_NUM_TT;
407 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
408 ft_attr->prio = MLX5E_TC_PRIO;
409}
410
411static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
412{
413 struct mlx5e_priv *priv = hp->func_priv;
414 struct ttc_params ttc_params;
415 int err;
416
417 err = mlx5e_hairpin_create_indirect_rqt(hp);
418 if (err)
419 return err;
420
421 err = mlx5e_hairpin_create_indirect_tirs(hp);
422 if (err)
423 goto err_create_indirect_tirs;
424
425 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
426 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
427 if (err)
428 goto err_create_ttc_table;
429
430 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
431 hp->num_channels, hp->ttc.ft.t->id);
432
433 return 0;
434
435err_create_ttc_table:
436 mlx5e_hairpin_destroy_indirect_tirs(hp);
437err_create_indirect_tirs:
438 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
439
440 return err;
441}
442
443static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
444{
445 struct mlx5e_priv *priv = hp->func_priv;
446
447 mlx5e_destroy_ttc_table(priv, &hp->ttc);
448 mlx5e_hairpin_destroy_indirect_tirs(hp);
449 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
450}
451
77ab67b7
OG
452static struct mlx5e_hairpin *
453mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
454 int peer_ifindex)
455{
456 struct mlx5_core_dev *func_mdev, *peer_mdev;
457 struct mlx5e_hairpin *hp;
458 struct mlx5_hairpin *pair;
459 int err;
460
461 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
462 if (!hp)
463 return ERR_PTR(-ENOMEM);
464
465 func_mdev = priv->mdev;
466 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
467
468 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
469 if (IS_ERR(pair)) {
470 err = PTR_ERR(pair);
471 goto create_pair_err;
472 }
473 hp->pair = pair;
474 hp->func_mdev = func_mdev;
3f6d08d1
OG
475 hp->func_priv = priv;
476 hp->num_channels = params->num_channels;
77ab67b7
OG
477
478 err = mlx5e_hairpin_create_transport(hp);
479 if (err)
480 goto create_transport_err;
481
3f6d08d1
OG
482 if (hp->num_channels > 1) {
483 err = mlx5e_hairpin_rss_init(hp);
484 if (err)
485 goto rss_init_err;
486 }
487
77ab67b7
OG
488 return hp;
489
3f6d08d1
OG
490rss_init_err:
491 mlx5e_hairpin_destroy_transport(hp);
77ab67b7
OG
492create_transport_err:
493 mlx5_core_hairpin_destroy(hp->pair);
494create_pair_err:
495 kfree(hp);
496 return ERR_PTR(err);
497}
498
499static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
500{
3f6d08d1
OG
501 if (hp->num_channels > 1)
502 mlx5e_hairpin_rss_cleanup(hp);
77ab67b7
OG
503 mlx5e_hairpin_destroy_transport(hp);
504 mlx5_core_hairpin_destroy(hp->pair);
505 kvfree(hp);
506}
507
106be53b
OG
508static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
509{
510 return (peer_vhca_id << 16 | prio);
511}
512
5c65c564 513static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
106be53b 514 u16 peer_vhca_id, u8 prio)
5c65c564
OG
515{
516 struct mlx5e_hairpin_entry *hpe;
106be53b 517 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
5c65c564
OG
518
519 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
106be53b
OG
520 hairpin_hlist, hash_key) {
521 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
5c65c564
OG
522 return hpe;
523 }
524
525 return NULL;
526}
527
106be53b
OG
528#define UNKNOWN_MATCH_PRIO 8
529
530static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
e98bedf5
EB
531 struct mlx5_flow_spec *spec, u8 *match_prio,
532 struct netlink_ext_ack *extack)
106be53b
OG
533{
534 void *headers_c, *headers_v;
535 u8 prio_val, prio_mask = 0;
536 bool vlan_present;
537
538#ifdef CONFIG_MLX5_CORE_EN_DCB
539 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
e98bedf5
EB
540 NL_SET_ERR_MSG_MOD(extack,
541 "only PCP trust state supported for hairpin");
106be53b
OG
542 return -EOPNOTSUPP;
543 }
544#endif
545 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
546 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
547
548 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
549 if (vlan_present) {
550 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
551 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
552 }
553
554 if (!vlan_present || !prio_mask) {
555 prio_val = UNKNOWN_MATCH_PRIO;
556 } else if (prio_mask != 0x7) {
e98bedf5
EB
557 NL_SET_ERR_MSG_MOD(extack,
558 "masked priority match not supported for hairpin");
106be53b
OG
559 return -EOPNOTSUPP;
560 }
561
562 *match_prio = prio_val;
563 return 0;
564}
565
5c65c564
OG
566static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
567 struct mlx5e_tc_flow *flow,
e98bedf5
EB
568 struct mlx5e_tc_flow_parse_attr *parse_attr,
569 struct netlink_ext_ack *extack)
5c65c564
OG
570{
571 int peer_ifindex = parse_attr->mirred_ifindex;
572 struct mlx5_hairpin_params params;
d8822868 573 struct mlx5_core_dev *peer_mdev;
5c65c564
OG
574 struct mlx5e_hairpin_entry *hpe;
575 struct mlx5e_hairpin *hp;
3f6d08d1
OG
576 u64 link_speed64;
577 u32 link_speed;
106be53b 578 u8 match_prio;
d8822868 579 u16 peer_id;
5c65c564
OG
580 int err;
581
d8822868
OG
582 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
583 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
e98bedf5 584 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
5c65c564
OG
585 return -EOPNOTSUPP;
586 }
587
d8822868 588 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
e98bedf5
EB
589 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
590 extack);
106be53b
OG
591 if (err)
592 return err;
593 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
5c65c564
OG
594 if (hpe)
595 goto attach_flow;
596
597 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
598 if (!hpe)
599 return -ENOMEM;
600
601 INIT_LIST_HEAD(&hpe->flows);
d8822868 602 hpe->peer_vhca_id = peer_id;
106be53b 603 hpe->prio = match_prio;
5c65c564
OG
604
605 params.log_data_size = 15;
606 params.log_data_size = min_t(u8, params.log_data_size,
607 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
608 params.log_data_size = max_t(u8, params.log_data_size,
609 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
5c65c564 610
eb9180f7
OG
611 params.log_num_packets = params.log_data_size -
612 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
613 params.log_num_packets = min_t(u8, params.log_num_packets,
614 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
615
616 params.q_counter = priv->q_counter;
3f6d08d1 617 /* set hairpin pair per each 50Gbs share of the link */
2c81bfd5 618 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
3f6d08d1
OG
619 link_speed = max_t(u32, link_speed, 50000);
620 link_speed64 = link_speed;
621 do_div(link_speed64, 50000);
622 params.num_channels = link_speed64;
623
5c65c564
OG
624 hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
625 if (IS_ERR(hp)) {
626 err = PTR_ERR(hp);
627 goto create_hairpin_err;
628 }
629
eb9180f7 630 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
ddae74ac 631 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
eb9180f7 632 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
5c65c564
OG
633
634 hpe->hp = hp;
106be53b
OG
635 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
636 hash_hairpin_info(peer_id, match_prio));
5c65c564
OG
637
638attach_flow:
3f6d08d1
OG
639 if (hpe->hp->num_channels > 1) {
640 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
641 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
642 } else {
643 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
644 }
5c65c564 645 list_add(&flow->hairpin, &hpe->flows);
3f6d08d1 646
5c65c564
OG
647 return 0;
648
649create_hairpin_err:
650 kfree(hpe);
651 return err;
652}
653
654static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
655 struct mlx5e_tc_flow *flow)
656{
657 struct list_head *next = flow->hairpin.next;
658
659 list_del(&flow->hairpin);
660
661 /* no more hairpin flows for us, release the hairpin pair */
662 if (list_empty(next)) {
663 struct mlx5e_hairpin_entry *hpe;
664
665 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
666
667 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
668 hpe->hp->pair->peer_mdev->priv.name);
669
670 mlx5e_hairpin_destroy(hpe->hp);
671 hash_del(&hpe->hairpin_hlist);
672 kfree(hpe);
673 }
674}
675
c83954ab 676static int
74491de9 677mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
17091853 678 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
679 struct mlx5e_tc_flow *flow,
680 struct netlink_ext_ack *extack)
e8f887ac 681{
aa0cbbae 682 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
aad7e08d 683 struct mlx5_core_dev *dev = priv->mdev;
5c65c564 684 struct mlx5_flow_destination dest[2] = {};
66958ed9 685 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
686 .action = attr->action,
687 .flow_tag = attr->flow_tag,
60786f09 688 .reformat_id = 0,
42f7ad67 689 .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
66958ed9 690 };
aad7e08d 691 struct mlx5_fc *counter = NULL;
e8f887ac 692 bool table_created = false;
5c65c564 693 int err, dest_ix = 0;
e8f887ac 694
3f6d08d1 695 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
e98bedf5 696 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
3f6d08d1 697 if (err) {
3f6d08d1
OG
698 goto err_add_hairpin_flow;
699 }
700 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
701 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
702 dest[dest_ix].ft = attr->hairpin_ft;
703 } else {
5c65c564
OG
704 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
705 dest[dest_ix].tir_num = attr->hairpin_tirn;
5c65c564
OG
706 }
707 dest_ix++;
3f6d08d1
OG
708 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
709 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
710 dest[dest_ix].ft = priv->fs.vlan.ft.t;
711 dest_ix++;
5c65c564 712 }
aad7e08d 713
5c65c564
OG
714 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
715 counter = mlx5_fc_create(dev, true);
716 if (IS_ERR(counter)) {
c83954ab 717 err = PTR_ERR(counter);
5c65c564
OG
718 goto err_fc_create;
719 }
720 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 721 dest[dest_ix].counter_id = mlx5_fc_id(counter);
5c65c564 722 dest_ix++;
b8aee822 723 attr->counter = counter;
aad7e08d
AV
724 }
725
2f4fe4ca 726 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3099eb5a 727 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
d7e75a32 728 flow_act.modify_id = attr->mod_hdr_id;
2f4fe4ca 729 kfree(parse_attr->mod_hdr_actions);
c83954ab 730 if (err)
2f4fe4ca 731 goto err_create_mod_hdr_id;
2f4fe4ca
OG
732 }
733
acff797c 734 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
21b9c144
OG
735 int tc_grp_size, tc_tbl_size;
736 u32 max_flow_counter;
737
738 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
739 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
740
741 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
742
743 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
744 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
745
acff797c
MG
746 priv->fs.tc.t =
747 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
748 MLX5E_TC_PRIO,
21b9c144 749 tc_tbl_size,
acff797c 750 MLX5E_TC_TABLE_NUM_GROUPS,
3f6d08d1 751 MLX5E_TC_FT_LEVEL, 0);
acff797c 752 if (IS_ERR(priv->fs.tc.t)) {
e98bedf5
EB
753 NL_SET_ERR_MSG_MOD(extack,
754 "Failed to create tc offload table\n");
e8f887ac
AV
755 netdev_err(priv->netdev,
756 "Failed to create tc offload table\n");
c83954ab 757 err = PTR_ERR(priv->fs.tc.t);
aad7e08d 758 goto err_create_ft;
e8f887ac
AV
759 }
760
761 table_created = true;
762 }
763
38aa51c1
OG
764 if (attr->match_level != MLX5_MATCH_NONE)
765 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
766
c83954ab
RL
767 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
768 &flow_act, dest, dest_ix);
aad7e08d 769
c83954ab
RL
770 if (IS_ERR(flow->rule[0])) {
771 err = PTR_ERR(flow->rule[0]);
aad7e08d 772 goto err_add_rule;
c83954ab 773 }
aad7e08d 774
c83954ab 775 return 0;
e8f887ac 776
aad7e08d
AV
777err_add_rule:
778 if (table_created) {
acff797c
MG
779 mlx5_destroy_flow_table(priv->fs.tc.t);
780 priv->fs.tc.t = NULL;
e8f887ac 781 }
aad7e08d 782err_create_ft:
2f4fe4ca 783 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3099eb5a 784 mlx5e_detach_mod_hdr(priv, flow);
2f4fe4ca 785err_create_mod_hdr_id:
aad7e08d 786 mlx5_fc_destroy(dev, counter);
5c65c564
OG
787err_fc_create:
788 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
789 mlx5e_hairpin_flow_del(priv, flow);
790err_add_hairpin_flow:
c83954ab 791 return err;
e8f887ac
AV
792}
793
d85cdccb
OG
794static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
795 struct mlx5e_tc_flow *flow)
796{
513f8f7f 797 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
d85cdccb
OG
798 struct mlx5_fc *counter = NULL;
799
b8aee822 800 counter = attr->counter;
e4ad91f2 801 mlx5_del_flow_rules(flow->rule[0]);
aa0cbbae 802 mlx5_fc_destroy(priv->mdev, counter);
d85cdccb 803
b3a433de 804 if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
d85cdccb
OG
805 mlx5_destroy_flow_table(priv->fs.tc.t);
806 priv->fs.tc.t = NULL;
807 }
2f4fe4ca 808
513f8f7f 809 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3099eb5a 810 mlx5e_detach_mod_hdr(priv, flow);
5c65c564
OG
811
812 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
813 mlx5e_hairpin_flow_del(priv, flow);
d85cdccb
OG
814}
815
aa0cbbae
OG
816static void mlx5e_detach_encap(struct mlx5e_priv *priv,
817 struct mlx5e_tc_flow *flow);
818
3c37745e
OG
819static int mlx5e_attach_encap(struct mlx5e_priv *priv,
820 struct ip_tunnel_info *tun_info,
821 struct net_device *mirred_dev,
822 struct net_device **encap_dev,
e98bedf5
EB
823 struct mlx5e_tc_flow *flow,
824 struct netlink_ext_ack *extack);
3c37745e 825
c83954ab 826static int
74491de9 827mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
17091853 828 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
829 struct mlx5e_tc_flow *flow,
830 struct netlink_ext_ack *extack)
adb4c123
OG
831{
832 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aa0cbbae 833 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3c37745e 834 struct net_device *out_dev, *encap_dev = NULL;
b8aee822 835 struct mlx5_fc *counter = NULL;
3c37745e
OG
836 struct mlx5e_rep_priv *rpriv;
837 struct mlx5e_priv *out_priv;
c83954ab 838 int err = 0, encap_err = 0;
8b32580d 839
e52c2802
PB
840 /* keep the old behaviour, use same prio for all offloaded rules */
841 attr->prio = 1;
842
60786f09 843 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
3c37745e
OG
844 out_dev = __dev_get_by_index(dev_net(priv->netdev),
845 attr->parse_attr->mirred_ifindex);
c83954ab
RL
846 encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
847 out_dev, &encap_dev, flow,
848 extack);
849 if (encap_err && encap_err != -EAGAIN) {
850 err = encap_err;
851 goto err_attach_encap;
3c37745e
OG
852 }
853 out_priv = netdev_priv(encap_dev);
854 rpriv = out_priv->ppriv;
592d3651
CM
855 attr->out_rep[attr->out_count] = rpriv->rep;
856 attr->out_mdev[attr->out_count++] = out_priv->mdev;
3c37745e
OG
857 }
858
8b32580d 859 err = mlx5_eswitch_add_vlan_action(esw, attr);
c83954ab 860 if (err)
aa0cbbae 861 goto err_add_vlan;
adb4c123 862
d7e75a32 863 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1a9527bb 864 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
d7e75a32 865 kfree(parse_attr->mod_hdr_actions);
c83954ab 866 if (err)
d7e75a32 867 goto err_mod_hdr;
d7e75a32
OG
868 }
869
b8aee822
MB
870 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
871 counter = mlx5_fc_create(esw->dev, true);
872 if (IS_ERR(counter)) {
c83954ab 873 err = PTR_ERR(counter);
b8aee822
MB
874 goto err_create_counter;
875 }
876
877 attr->counter = counter;
878 }
879
c83954ab 880 /* we get here if (1) there's no error or when
3c37745e
OG
881 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
882 */
c83954ab
RL
883 if (encap_err != -EAGAIN) {
884 flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
885 if (IS_ERR(flow->rule[0])) {
886 err = PTR_ERR(flow->rule[0]);
3c37745e 887 goto err_add_rule;
c83954ab 888 }
e4ad91f2
CM
889
890 if (attr->mirror_count) {
891 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
c83954ab
RL
892 if (IS_ERR(flow->rule[1])) {
893 err = PTR_ERR(flow->rule[1]);
e4ad91f2 894 goto err_fwd_rule;
c83954ab 895 }
e4ad91f2 896 }
3c37745e 897 }
c83954ab
RL
898
899 return encap_err;
aa0cbbae 900
e4ad91f2 901err_fwd_rule:
c83954ab 902 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
aa0cbbae 903err_add_rule:
b8aee822
MB
904 mlx5_fc_destroy(esw->dev, counter);
905err_create_counter:
513f8f7f 906 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1a9527bb 907 mlx5e_detach_mod_hdr(priv, flow);
d7e75a32 908err_mod_hdr:
aa0cbbae
OG
909 mlx5_eswitch_del_vlan_action(esw, attr);
910err_add_vlan:
60786f09 911 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
aa0cbbae 912 mlx5e_detach_encap(priv, flow);
3c37745e 913err_attach_encap:
c83954ab 914 return err;
aa0cbbae 915}
d85cdccb
OG
916
917static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
918 struct mlx5e_tc_flow *flow)
919{
920 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
d7e75a32 921 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
d85cdccb 922
232c0013
HHZ
923 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
924 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
e4ad91f2 925 if (attr->mirror_count)
48265006 926 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
e4ad91f2 927 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
232c0013 928 }
d85cdccb 929
513f8f7f 930 mlx5_eswitch_del_vlan_action(esw, attr);
d85cdccb 931
60786f09 932 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
d85cdccb 933 mlx5e_detach_encap(priv, flow);
513f8f7f 934 kvfree(attr->parse_attr);
232c0013 935 }
d7e75a32 936
513f8f7f 937 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1a9527bb 938 mlx5e_detach_mod_hdr(priv, flow);
b8aee822
MB
939
940 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
941 mlx5_fc_destroy(esw->dev, attr->counter);
d85cdccb
OG
942}
943
232c0013
HHZ
944void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
945 struct mlx5e_encap_entry *e)
946{
3c37745e
OG
947 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
948 struct mlx5_esw_flow_attr *esw_attr;
232c0013
HHZ
949 struct mlx5e_tc_flow *flow;
950 int err;
951
60786f09
MB
952 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
953 e->encap_size, e->encap_header,
31ca3648 954 MLX5_FLOW_NAMESPACE_FDB,
60786f09 955 &e->encap_id);
232c0013
HHZ
956 if (err) {
957 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
958 err);
959 return;
960 }
961 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 962 mlx5e_rep_queue_neigh_stats_work(priv);
232c0013
HHZ
963
964 list_for_each_entry(flow, &e->flows, encap) {
3c37745e
OG
965 esw_attr = flow->esw_attr;
966 esw_attr->encap_id = e->encap_id;
e4ad91f2
CM
967 flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
968 if (IS_ERR(flow->rule[0])) {
969 err = PTR_ERR(flow->rule[0]);
232c0013
HHZ
970 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
971 err);
972 continue;
973 }
e4ad91f2
CM
974
975 if (esw_attr->mirror_count) {
976 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
977 if (IS_ERR(flow->rule[1])) {
978 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
979 err = PTR_ERR(flow->rule[1]);
980 mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
981 err);
982 continue;
983 }
984 }
985
232c0013
HHZ
986 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
987 }
988}
989
990void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
991 struct mlx5e_encap_entry *e)
992{
3c37745e 993 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
232c0013 994 struct mlx5e_tc_flow *flow;
232c0013
HHZ
995
996 list_for_each_entry(flow, &e->flows, encap) {
997 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
e4ad91f2
CM
998 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
999
232c0013 1000 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
e4ad91f2 1001 if (attr->mirror_count)
48265006 1002 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
e4ad91f2 1003 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
232c0013
HHZ
1004 }
1005 }
1006
1007 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
1008 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
60786f09 1009 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
232c0013
HHZ
1010 }
1011}
1012
b8aee822
MB
1013static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1014{
1015 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1016 return flow->esw_attr->counter;
1017 else
1018 return flow->nic_attr->counter;
1019}
1020
f6dfb4c3
HHZ
1021void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1022{
1023 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1024 u64 bytes, packets, lastuse = 0;
1025 struct mlx5e_tc_flow *flow;
1026 struct mlx5e_encap_entry *e;
1027 struct mlx5_fc *counter;
1028 struct neigh_table *tbl;
1029 bool neigh_used = false;
1030 struct neighbour *n;
1031
1032 if (m_neigh->family == AF_INET)
1033 tbl = &arp_tbl;
1034#if IS_ENABLED(CONFIG_IPV6)
1035 else if (m_neigh->family == AF_INET6)
423c9db2 1036 tbl = &nd_tbl;
f6dfb4c3
HHZ
1037#endif
1038 else
1039 return;
1040
1041 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1042 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1043 continue;
1044 list_for_each_entry(flow, &e->flows, encap) {
1045 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
b8aee822 1046 counter = mlx5e_tc_get_counter(flow);
f6dfb4c3
HHZ
1047 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1048 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1049 neigh_used = true;
1050 break;
1051 }
1052 }
1053 }
e36d4810
RD
1054 if (neigh_used)
1055 break;
f6dfb4c3
HHZ
1056 }
1057
1058 if (neigh_used) {
1059 nhe->reported_lastuse = jiffies;
1060
1061 /* find the relevant neigh according to the cached device and
1062 * dst ip pair
1063 */
1064 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
c7f7ba8d 1065 if (!n)
f6dfb4c3 1066 return;
f6dfb4c3
HHZ
1067
1068 neigh_event_send(n, NULL);
1069 neigh_release(n);
1070 }
1071}
1072
d85cdccb
OG
1073static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1074 struct mlx5e_tc_flow *flow)
1075{
5067b602
RD
1076 struct list_head *next = flow->encap.next;
1077
1078 list_del(&flow->encap);
1079 if (list_empty(next)) {
c1ae1152 1080 struct mlx5e_encap_entry *e;
5067b602 1081
c1ae1152 1082 e = list_entry(next, struct mlx5e_encap_entry, flows);
232c0013
HHZ
1083 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1084
1085 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
60786f09 1086 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
232c0013 1087
cdc5a7f3 1088 hash_del_rcu(&e->encap_hlist);
232c0013 1089 kfree(e->encap_header);
5067b602
RD
1090 kfree(e);
1091 }
1092}
1093
e8f887ac 1094static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 1095 struct mlx5e_tc_flow *flow)
e8f887ac 1096{
d85cdccb
OG
1097 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1098 mlx5e_tc_del_fdb_flow(priv, flow);
1099 else
1100 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
1101}
1102
bbd00f7e
HHZ
1103static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
1104 struct tc_cls_flower_offload *f)
1105{
1106 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1107 outer_headers);
1108 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1109 outer_headers);
1110 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1111 misc_parameters);
1112 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1113 misc_parameters);
1114
1115 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
1116 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1117
1118 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1119 struct flow_dissector_key_keyid *key =
1120 skb_flow_dissector_target(f->dissector,
1121 FLOW_DISSECTOR_KEY_ENC_KEYID,
1122 f->key);
1123 struct flow_dissector_key_keyid *mask =
1124 skb_flow_dissector_target(f->dissector,
1125 FLOW_DISSECTOR_KEY_ENC_KEYID,
1126 f->mask);
1127 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
1128 be32_to_cpu(mask->keyid));
1129 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1130 be32_to_cpu(key->keyid));
1131 }
1132}
1133
1134static int parse_tunnel_attr(struct mlx5e_priv *priv,
1135 struct mlx5_flow_spec *spec,
1136 struct tc_cls_flower_offload *f)
1137{
e98bedf5 1138 struct netlink_ext_ack *extack = f->common.extack;
bbd00f7e
HHZ
1139 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1140 outer_headers);
1141 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1142 outer_headers);
1143
2e72eb43
OG
1144 struct flow_dissector_key_control *enc_control =
1145 skb_flow_dissector_target(f->dissector,
1146 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1147 f->key);
1148
bbd00f7e
HHZ
1149 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
1150 struct flow_dissector_key_ports *key =
1151 skb_flow_dissector_target(f->dissector,
1152 FLOW_DISSECTOR_KEY_ENC_PORTS,
1153 f->key);
1154 struct flow_dissector_key_ports *mask =
1155 skb_flow_dissector_target(f->dissector,
1156 FLOW_DISSECTOR_KEY_ENC_PORTS,
1157 f->mask);
1158
1159 /* Full udp dst port must be given */
1160 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 1161 goto vxlan_match_offload_err;
bbd00f7e 1162
a3e67366 1163 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
bbd00f7e
HHZ
1164 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
1165 parse_vxlan_attr(spec, f);
2fcd82e9 1166 else {
e98bedf5
EB
1167 NL_SET_ERR_MSG_MOD(extack,
1168 "port isn't an offloaded vxlan udp dport");
2fcd82e9
OG
1169 netdev_warn(priv->netdev,
1170 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 1171 return -EOPNOTSUPP;
2fcd82e9 1172 }
bbd00f7e
HHZ
1173
1174 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1175 udp_dport, ntohs(mask->dst));
1176 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1177 udp_dport, ntohs(key->dst));
1178
cd377663
OG
1179 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1180 udp_sport, ntohs(mask->src));
1181 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1182 udp_sport, ntohs(key->src));
bbd00f7e 1183 } else { /* udp dst port must be given */
2fcd82e9 1184vxlan_match_offload_err:
e98bedf5
EB
1185 NL_SET_ERR_MSG_MOD(extack,
1186 "IP tunnel decap offload supported only for vxlan, must set UDP dport");
2fcd82e9
OG
1187 netdev_warn(priv->netdev,
1188 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
1189 return -EOPNOTSUPP;
bbd00f7e
HHZ
1190 }
1191
2e72eb43 1192 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
1193 struct flow_dissector_key_ipv4_addrs *key =
1194 skb_flow_dissector_target(f->dissector,
1195 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1196 f->key);
1197 struct flow_dissector_key_ipv4_addrs *mask =
1198 skb_flow_dissector_target(f->dissector,
1199 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1200 f->mask);
1201 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1202 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1203 ntohl(mask->src));
1204 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1205 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1206 ntohl(key->src));
1207
1208 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1209 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1210 ntohl(mask->dst));
1211 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1212 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1213 ntohl(key->dst));
bbd00f7e 1214
2e72eb43
OG
1215 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1216 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
1217 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1218 struct flow_dissector_key_ipv6_addrs *key =
1219 skb_flow_dissector_target(f->dissector,
1220 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1221 f->key);
1222 struct flow_dissector_key_ipv6_addrs *mask =
1223 skb_flow_dissector_target(f->dissector,
1224 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1225 f->mask);
1226
1227 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1228 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1229 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1230 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1231 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1232 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1233
1234 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1235 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1236 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1237 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1238 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1239 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1240
1241 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1242 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 1243 }
bbd00f7e 1244
bcef735c
OG
1245 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
1246 struct flow_dissector_key_ip *key =
1247 skb_flow_dissector_target(f->dissector,
1248 FLOW_DISSECTOR_KEY_ENC_IP,
1249 f->key);
1250 struct flow_dissector_key_ip *mask =
1251 skb_flow_dissector_target(f->dissector,
1252 FLOW_DISSECTOR_KEY_ENC_IP,
1253 f->mask);
1254
1255 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1256 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1257
1258 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1259 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1260
1261 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1262 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
e98bedf5
EB
1263
1264 if (mask->ttl &&
1265 !MLX5_CAP_ESW_FLOWTABLE_FDB
1266 (priv->mdev,
1267 ft_field_support.outer_ipv4_ttl)) {
1268 NL_SET_ERR_MSG_MOD(extack,
1269 "Matching on TTL is not supported");
1270 return -EOPNOTSUPP;
1271 }
1272
bcef735c
OG
1273 }
1274
bbd00f7e
HHZ
1275 /* Enforce DMAC when offloading incoming tunneled flows.
1276 * Flow counters require a match on the DMAC.
1277 */
1278 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1279 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1280 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1281 dmac_47_16), priv->netdev->dev_addr);
1282
1283 /* let software handle IP fragments */
1284 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1285 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1286
1287 return 0;
1288}
1289
de0af0bf
RD
1290static int __parse_cls_flower(struct mlx5e_priv *priv,
1291 struct mlx5_flow_spec *spec,
1292 struct tc_cls_flower_offload *f,
d708f902 1293 u8 *match_level)
e3a2b7ed 1294{
e98bedf5 1295 struct netlink_ext_ack *extack = f->common.extack;
c5bb1730
MG
1296 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1297 outer_headers);
1298 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1299 outer_headers);
699e96dd
JL
1300 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1301 misc_parameters);
1302 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1303 misc_parameters);
e3a2b7ed
AV
1304 u16 addr_type = 0;
1305 u8 ip_proto = 0;
1306
d708f902 1307 *match_level = MLX5_MATCH_NONE;
de0af0bf 1308
e3a2b7ed
AV
1309 if (f->dissector->used_keys &
1310 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1311 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1312 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 1313 BIT(FLOW_DISSECTOR_KEY_VLAN) |
699e96dd 1314 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
e3a2b7ed
AV
1315 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1316 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
1317 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1318 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1319 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1320 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1321 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
e77834ec 1322 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
fd7da28b 1323 BIT(FLOW_DISSECTOR_KEY_TCP) |
bcef735c
OG
1324 BIT(FLOW_DISSECTOR_KEY_IP) |
1325 BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
e98bedf5 1326 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
e3a2b7ed
AV
1327 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1328 f->dissector->used_keys);
1329 return -EOPNOTSUPP;
1330 }
1331
bbd00f7e
HHZ
1332 if ((dissector_uses_key(f->dissector,
1333 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1334 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1335 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1336 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1337 struct flow_dissector_key_control *key =
1338 skb_flow_dissector_target(f->dissector,
1339 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1340 f->key);
1341 switch (key->addr_type) {
1342 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 1343 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
1344 if (parse_tunnel_attr(priv, spec, f))
1345 return -EOPNOTSUPP;
1346 break;
1347 default:
1348 return -EOPNOTSUPP;
1349 }
1350
1351 /* In decap flow, header pointers should point to the inner
1352 * headers, outer header were already set by parse_tunnel_attr
1353 */
1354 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1355 inner_headers);
1356 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1357 inner_headers);
1358 }
1359
e3a2b7ed
AV
1360 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1361 struct flow_dissector_key_eth_addrs *key =
1362 skb_flow_dissector_target(f->dissector,
1363 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1364 f->key);
1365 struct flow_dissector_key_eth_addrs *mask =
1366 skb_flow_dissector_target(f->dissector,
1367 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1368 f->mask);
1369
1370 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1371 dmac_47_16),
1372 mask->dst);
1373 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1374 dmac_47_16),
1375 key->dst);
1376
1377 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1378 smac_47_16),
1379 mask->src);
1380 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1381 smac_47_16),
1382 key->src);
54782900
OG
1383
1384 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
d708f902 1385 *match_level = MLX5_MATCH_L2;
e3a2b7ed
AV
1386 }
1387
095b6cfd
OG
1388 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1389 struct flow_dissector_key_vlan *key =
1390 skb_flow_dissector_target(f->dissector,
1391 FLOW_DISSECTOR_KEY_VLAN,
1392 f->key);
1393 struct flow_dissector_key_vlan *mask =
1394 skb_flow_dissector_target(f->dissector,
1395 FLOW_DISSECTOR_KEY_VLAN,
1396 f->mask);
699e96dd
JL
1397 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1398 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1399 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1400 svlan_tag, 1);
1401 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1402 svlan_tag, 1);
1403 } else {
1404 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1405 cvlan_tag, 1);
1406 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1407 cvlan_tag, 1);
1408 }
095b6cfd
OG
1409
1410 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1411 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
1412
1413 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1414 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
54782900 1415
d708f902 1416 *match_level = MLX5_MATCH_L2;
54782900 1417 }
cee26487
JL
1418 } else {
1419 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1420 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
54782900
OG
1421 }
1422
699e96dd
JL
1423 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
1424 struct flow_dissector_key_vlan *key =
1425 skb_flow_dissector_target(f->dissector,
1426 FLOW_DISSECTOR_KEY_CVLAN,
1427 f->key);
1428 struct flow_dissector_key_vlan *mask =
1429 skb_flow_dissector_target(f->dissector,
1430 FLOW_DISSECTOR_KEY_CVLAN,
1431 f->mask);
1432 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1433 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1434 MLX5_SET(fte_match_set_misc, misc_c,
1435 outer_second_svlan_tag, 1);
1436 MLX5_SET(fte_match_set_misc, misc_v,
1437 outer_second_svlan_tag, 1);
1438 } else {
1439 MLX5_SET(fte_match_set_misc, misc_c,
1440 outer_second_cvlan_tag, 1);
1441 MLX5_SET(fte_match_set_misc, misc_v,
1442 outer_second_cvlan_tag, 1);
1443 }
1444
1445 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1446 mask->vlan_id);
1447 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1448 key->vlan_id);
1449 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1450 mask->vlan_priority);
1451 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1452 key->vlan_priority);
1453
1454 *match_level = MLX5_MATCH_L2;
1455 }
1456 }
1457
54782900
OG
1458 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1459 struct flow_dissector_key_basic *key =
1460 skb_flow_dissector_target(f->dissector,
1461 FLOW_DISSECTOR_KEY_BASIC,
1462 f->key);
1463 struct flow_dissector_key_basic *mask =
1464 skb_flow_dissector_target(f->dissector,
1465 FLOW_DISSECTOR_KEY_BASIC,
1466 f->mask);
1467 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1468 ntohs(mask->n_proto));
1469 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1470 ntohs(key->n_proto));
1471
1472 if (mask->n_proto)
d708f902 1473 *match_level = MLX5_MATCH_L2;
54782900
OG
1474 }
1475
1476 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1477 struct flow_dissector_key_control *key =
1478 skb_flow_dissector_target(f->dissector,
1479 FLOW_DISSECTOR_KEY_CONTROL,
1480 f->key);
1481
1482 struct flow_dissector_key_control *mask =
1483 skb_flow_dissector_target(f->dissector,
1484 FLOW_DISSECTOR_KEY_CONTROL,
1485 f->mask);
1486 addr_type = key->addr_type;
1487
1488 /* the HW doesn't support frag first/later */
1489 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1490 return -EOPNOTSUPP;
1491
1492 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1493 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1494 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1495 key->flags & FLOW_DIS_IS_FRAGMENT);
1496
1497 /* the HW doesn't need L3 inline to match on frag=no */
1498 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
d708f902 1499 *match_level = MLX5_INLINE_MODE_L2;
54782900
OG
1500 /* *** L2 attributes parsing up to here *** */
1501 else
d708f902 1502 *match_level = MLX5_INLINE_MODE_IP;
095b6cfd
OG
1503 }
1504 }
1505
54782900
OG
1506 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1507 struct flow_dissector_key_basic *key =
1508 skb_flow_dissector_target(f->dissector,
1509 FLOW_DISSECTOR_KEY_BASIC,
1510 f->key);
1511 struct flow_dissector_key_basic *mask =
1512 skb_flow_dissector_target(f->dissector,
1513 FLOW_DISSECTOR_KEY_BASIC,
1514 f->mask);
1515 ip_proto = key->ip_proto;
1516
1517 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1518 mask->ip_proto);
1519 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1520 key->ip_proto);
1521
1522 if (mask->ip_proto)
d708f902 1523 *match_level = MLX5_MATCH_L3;
54782900
OG
1524 }
1525
e3a2b7ed
AV
1526 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1527 struct flow_dissector_key_ipv4_addrs *key =
1528 skb_flow_dissector_target(f->dissector,
1529 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1530 f->key);
1531 struct flow_dissector_key_ipv4_addrs *mask =
1532 skb_flow_dissector_target(f->dissector,
1533 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1534 f->mask);
1535
1536 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1537 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1538 &mask->src, sizeof(mask->src));
1539 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1540 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1541 &key->src, sizeof(key->src));
1542 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1543 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1544 &mask->dst, sizeof(mask->dst));
1545 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1546 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1547 &key->dst, sizeof(key->dst));
de0af0bf
RD
1548
1549 if (mask->src || mask->dst)
d708f902 1550 *match_level = MLX5_MATCH_L3;
e3a2b7ed
AV
1551 }
1552
1553 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1554 struct flow_dissector_key_ipv6_addrs *key =
1555 skb_flow_dissector_target(f->dissector,
1556 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1557 f->key);
1558 struct flow_dissector_key_ipv6_addrs *mask =
1559 skb_flow_dissector_target(f->dissector,
1560 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1561 f->mask);
1562
1563 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1564 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1565 &mask->src, sizeof(mask->src));
1566 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1567 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1568 &key->src, sizeof(key->src));
1569
1570 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1571 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1572 &mask->dst, sizeof(mask->dst));
1573 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1574 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1575 &key->dst, sizeof(key->dst));
de0af0bf
RD
1576
1577 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1578 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
d708f902 1579 *match_level = MLX5_MATCH_L3;
e3a2b7ed
AV
1580 }
1581
1f97a526
OG
1582 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1583 struct flow_dissector_key_ip *key =
1584 skb_flow_dissector_target(f->dissector,
1585 FLOW_DISSECTOR_KEY_IP,
1586 f->key);
1587 struct flow_dissector_key_ip *mask =
1588 skb_flow_dissector_target(f->dissector,
1589 FLOW_DISSECTOR_KEY_IP,
1590 f->mask);
1591
1592 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1593 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1594
1595 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1596 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1597
a8ade55f
OG
1598 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1599 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1f97a526 1600
a8ade55f
OG
1601 if (mask->ttl &&
1602 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
e98bedf5
EB
1603 ft_field_support.outer_ipv4_ttl)) {
1604 NL_SET_ERR_MSG_MOD(extack,
1605 "Matching on TTL is not supported");
1f97a526 1606 return -EOPNOTSUPP;
e98bedf5 1607 }
a8ade55f
OG
1608
1609 if (mask->tos || mask->ttl)
d708f902 1610 *match_level = MLX5_MATCH_L3;
1f97a526
OG
1611 }
1612
54782900
OG
1613 /* *** L3 attributes parsing up to here *** */
1614
e3a2b7ed
AV
1615 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1616 struct flow_dissector_key_ports *key =
1617 skb_flow_dissector_target(f->dissector,
1618 FLOW_DISSECTOR_KEY_PORTS,
1619 f->key);
1620 struct flow_dissector_key_ports *mask =
1621 skb_flow_dissector_target(f->dissector,
1622 FLOW_DISSECTOR_KEY_PORTS,
1623 f->mask);
1624 switch (ip_proto) {
1625 case IPPROTO_TCP:
1626 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1627 tcp_sport, ntohs(mask->src));
1628 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1629 tcp_sport, ntohs(key->src));
1630
1631 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1632 tcp_dport, ntohs(mask->dst));
1633 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1634 tcp_dport, ntohs(key->dst));
1635 break;
1636
1637 case IPPROTO_UDP:
1638 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1639 udp_sport, ntohs(mask->src));
1640 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1641 udp_sport, ntohs(key->src));
1642
1643 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1644 udp_dport, ntohs(mask->dst));
1645 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1646 udp_dport, ntohs(key->dst));
1647 break;
1648 default:
e98bedf5
EB
1649 NL_SET_ERR_MSG_MOD(extack,
1650 "Only UDP and TCP transports are supported for L4 matching");
e3a2b7ed
AV
1651 netdev_err(priv->netdev,
1652 "Only UDP and TCP transport are supported\n");
1653 return -EINVAL;
1654 }
de0af0bf
RD
1655
1656 if (mask->src || mask->dst)
d708f902 1657 *match_level = MLX5_MATCH_L4;
e3a2b7ed
AV
1658 }
1659
e77834ec
OG
1660 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1661 struct flow_dissector_key_tcp *key =
1662 skb_flow_dissector_target(f->dissector,
1663 FLOW_DISSECTOR_KEY_TCP,
1664 f->key);
1665 struct flow_dissector_key_tcp *mask =
1666 skb_flow_dissector_target(f->dissector,
1667 FLOW_DISSECTOR_KEY_TCP,
1668 f->mask);
1669
1670 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1671 ntohs(mask->flags));
1672 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1673 ntohs(key->flags));
1674
1675 if (mask->flags)
d708f902 1676 *match_level = MLX5_MATCH_L4;
e77834ec
OG
1677 }
1678
e3a2b7ed
AV
1679 return 0;
1680}
1681
de0af0bf 1682static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 1683 struct mlx5e_tc_flow *flow,
de0af0bf
RD
1684 struct mlx5_flow_spec *spec,
1685 struct tc_cls_flower_offload *f)
1686{
e98bedf5 1687 struct netlink_ext_ack *extack = f->common.extack;
de0af0bf
RD
1688 struct mlx5_core_dev *dev = priv->mdev;
1689 struct mlx5_eswitch *esw = dev->priv.eswitch;
1d447a39
SM
1690 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1691 struct mlx5_eswitch_rep *rep;
d708f902 1692 u8 match_level;
de0af0bf
RD
1693 int err;
1694
d708f902 1695 err = __parse_cls_flower(priv, spec, f, &match_level);
de0af0bf 1696
1d447a39
SM
1697 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1698 rep = rpriv->rep;
1699 if (rep->vport != FDB_UPLINK_VPORT &&
1700 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
d708f902 1701 esw->offloads.inline_mode < match_level)) {
e98bedf5
EB
1702 NL_SET_ERR_MSG_MOD(extack,
1703 "Flow is not offloaded due to min inline setting");
de0af0bf
RD
1704 netdev_warn(priv->netdev,
1705 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
d708f902 1706 match_level, esw->offloads.inline_mode);
de0af0bf
RD
1707 return -EOPNOTSUPP;
1708 }
1709 }
1710
38aa51c1
OG
1711 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1712 flow->esw_attr->match_level = match_level;
1713 else
1714 flow->nic_attr->match_level = match_level;
1715
de0af0bf
RD
1716 return err;
1717}
1718
d79b6df6
OG
1719struct pedit_headers {
1720 struct ethhdr eth;
1721 struct iphdr ip4;
1722 struct ipv6hdr ip6;
1723 struct tcphdr tcp;
1724 struct udphdr udp;
1725};
1726
1727static int pedit_header_offsets[] = {
1728 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1729 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1730 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1731 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1732 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1733};
1734
1735#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1736
1737static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1738 struct pedit_headers *masks,
1739 struct pedit_headers *vals)
1740{
1741 u32 *curr_pmask, *curr_pval;
1742
1743 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1744 goto out_err;
1745
1746 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1747 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1748
1749 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1750 goto out_err;
1751
1752 *curr_pmask |= mask;
1753 *curr_pval |= (val & mask);
1754
1755 return 0;
1756
1757out_err:
1758 return -EOPNOTSUPP;
1759}
1760
1761struct mlx5_fields {
1762 u8 field;
1763 u8 size;
1764 u32 offset;
1765};
1766
a8e4f0c4
OG
1767#define OFFLOAD(fw_field, size, field, off) \
1768 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1769
d79b6df6 1770static struct mlx5_fields fields[] = {
a8e4f0c4
OG
1771 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1772 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1773 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1774 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1775 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1776
1777 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1778 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1779 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1780
1781 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1782 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1783 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1784 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1785 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1786 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1787 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1788 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
0c0316f5 1789 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
a8e4f0c4
OG
1790
1791 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1792 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1793 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1794
1795 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1796 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
d79b6df6
OG
1797};
1798
1799/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1800 * max from the SW pedit action. On success, it says how many HW actions were
1801 * actually parsed.
1802 */
1803static int offload_pedit_fields(struct pedit_headers *masks,
1804 struct pedit_headers *vals,
e98bedf5
EB
1805 struct mlx5e_tc_flow_parse_attr *parse_attr,
1806 struct netlink_ext_ack *extack)
d79b6df6
OG
1807{
1808 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2b64beba 1809 int i, action_size, nactions, max_actions, first, last, next_z;
d79b6df6 1810 void *s_masks_p, *a_masks_p, *vals_p;
d79b6df6
OG
1811 struct mlx5_fields *f;
1812 u8 cmd, field_bsize;
e3ca4e05 1813 u32 s_mask, a_mask;
d79b6df6 1814 unsigned long mask;
2b64beba
OG
1815 __be32 mask_be32;
1816 __be16 mask_be16;
d79b6df6
OG
1817 void *action;
1818
1819 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1820 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1821 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1822 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1823
1824 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1825 action = parse_attr->mod_hdr_actions;
1826 max_actions = parse_attr->num_mod_hdr_actions;
1827 nactions = 0;
1828
1829 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1830 f = &fields[i];
1831 /* avoid seeing bits set from previous iterations */
e3ca4e05
OG
1832 s_mask = 0;
1833 a_mask = 0;
d79b6df6
OG
1834
1835 s_masks_p = (void *)set_masks + f->offset;
1836 a_masks_p = (void *)add_masks + f->offset;
1837
1838 memcpy(&s_mask, s_masks_p, f->size);
1839 memcpy(&a_mask, a_masks_p, f->size);
1840
1841 if (!s_mask && !a_mask) /* nothing to offload here */
1842 continue;
1843
1844 if (s_mask && a_mask) {
e98bedf5
EB
1845 NL_SET_ERR_MSG_MOD(extack,
1846 "can't set and add to the same HW field");
d79b6df6
OG
1847 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1848 return -EOPNOTSUPP;
1849 }
1850
1851 if (nactions == max_actions) {
e98bedf5
EB
1852 NL_SET_ERR_MSG_MOD(extack,
1853 "too many pedit actions, can't offload");
d79b6df6
OG
1854 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1855 return -EOPNOTSUPP;
1856 }
1857
1858 if (s_mask) {
1859 cmd = MLX5_ACTION_TYPE_SET;
1860 mask = s_mask;
1861 vals_p = (void *)set_vals + f->offset;
1862 /* clear to denote we consumed this field */
1863 memset(s_masks_p, 0, f->size);
1864 } else {
1865 cmd = MLX5_ACTION_TYPE_ADD;
1866 mask = a_mask;
1867 vals_p = (void *)add_vals + f->offset;
1868 /* clear to denote we consumed this field */
1869 memset(a_masks_p, 0, f->size);
1870 }
1871
d79b6df6 1872 field_bsize = f->size * BITS_PER_BYTE;
e3ca4e05 1873
2b64beba
OG
1874 if (field_bsize == 32) {
1875 mask_be32 = *(__be32 *)&mask;
1876 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1877 } else if (field_bsize == 16) {
1878 mask_be16 = *(__be16 *)&mask;
1879 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1880 }
1881
d79b6df6 1882 first = find_first_bit(&mask, field_bsize);
2b64beba 1883 next_z = find_next_zero_bit(&mask, field_bsize, first);
d79b6df6 1884 last = find_last_bit(&mask, field_bsize);
2b64beba 1885 if (first < next_z && next_z < last) {
e98bedf5
EB
1886 NL_SET_ERR_MSG_MOD(extack,
1887 "rewrite of few sub-fields isn't supported");
2b64beba 1888 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
d79b6df6
OG
1889 mask);
1890 return -EOPNOTSUPP;
1891 }
1892
1893 MLX5_SET(set_action_in, action, action_type, cmd);
1894 MLX5_SET(set_action_in, action, field, f->field);
1895
1896 if (cmd == MLX5_ACTION_TYPE_SET) {
2b64beba 1897 MLX5_SET(set_action_in, action, offset, first);
d79b6df6 1898 /* length is num of bits to be written, zero means length of 32 */
2b64beba 1899 MLX5_SET(set_action_in, action, length, (last - first + 1));
d79b6df6
OG
1900 }
1901
1902 if (field_bsize == 32)
2b64beba 1903 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
d79b6df6 1904 else if (field_bsize == 16)
2b64beba 1905 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
d79b6df6 1906 else if (field_bsize == 8)
2b64beba 1907 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
d79b6df6
OG
1908
1909 action += action_size;
1910 nactions++;
1911 }
1912
1913 parse_attr->num_mod_hdr_actions = nactions;
1914 return 0;
1915}
1916
1917static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1918 const struct tc_action *a, int namespace,
1919 struct mlx5e_tc_flow_parse_attr *parse_attr)
1920{
1921 int nkeys, action_size, max_actions;
1922
1923 nkeys = tcf_pedit_nkeys(a);
1924 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1925
1926 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1927 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1928 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1929 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1930
1931 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1932 max_actions = min(max_actions, nkeys * 16);
1933
1934 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1935 if (!parse_attr->mod_hdr_actions)
1936 return -ENOMEM;
1937
1938 parse_attr->num_mod_hdr_actions = max_actions;
1939 return 0;
1940}
1941
1942static const struct pedit_headers zero_masks = {};
1943
1944static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1945 const struct tc_action *a, int namespace,
e98bedf5
EB
1946 struct mlx5e_tc_flow_parse_attr *parse_attr,
1947 struct netlink_ext_ack *extack)
d79b6df6
OG
1948{
1949 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1950 int nkeys, i, err = -EOPNOTSUPP;
1951 u32 mask, val, offset;
1952 u8 cmd, htype;
1953
1954 nkeys = tcf_pedit_nkeys(a);
1955
1956 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1957 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1958
1959 for (i = 0; i < nkeys; i++) {
1960 htype = tcf_pedit_htype(a, i);
1961 cmd = tcf_pedit_cmd(a, i);
1962 err = -EOPNOTSUPP; /* can't be all optimistic */
1963
1964 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
e98bedf5
EB
1965 NL_SET_ERR_MSG_MOD(extack,
1966 "legacy pedit isn't offloaded");
d79b6df6
OG
1967 goto out_err;
1968 }
1969
1970 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
e98bedf5 1971 NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
d79b6df6
OG
1972 goto out_err;
1973 }
1974
1975 mask = tcf_pedit_mask(a, i);
1976 val = tcf_pedit_val(a, i);
1977 offset = tcf_pedit_offset(a, i);
1978
1979 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1980 if (err)
1981 goto out_err;
1982 }
1983
1984 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1985 if (err)
1986 goto out_err;
1987
e98bedf5 1988 err = offload_pedit_fields(masks, vals, parse_attr, extack);
d79b6df6
OG
1989 if (err < 0)
1990 goto out_dealloc_parsed_actions;
1991
1992 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1993 cmd_masks = &masks[cmd];
1994 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
e98bedf5
EB
1995 NL_SET_ERR_MSG_MOD(extack,
1996 "attempt to offload an unsupported field");
b3a433de 1997 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
d79b6df6
OG
1998 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1999 16, 1, cmd_masks, sizeof(zero_masks), true);
2000 err = -EOPNOTSUPP;
2001 goto out_dealloc_parsed_actions;
2002 }
2003 }
2004
2005 return 0;
2006
2007out_dealloc_parsed_actions:
2008 kfree(parse_attr->mod_hdr_actions);
2009out_err:
2010 return err;
2011}
2012
e98bedf5
EB
2013static bool csum_offload_supported(struct mlx5e_priv *priv,
2014 u32 action,
2015 u32 update_flags,
2016 struct netlink_ext_ack *extack)
26c02749
OG
2017{
2018 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2019 TCA_CSUM_UPDATE_FLAG_UDP;
2020
2021 /* The HW recalcs checksums only if re-writing headers */
2022 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
e98bedf5
EB
2023 NL_SET_ERR_MSG_MOD(extack,
2024 "TC csum action is only offloaded with pedit");
26c02749
OG
2025 netdev_warn(priv->netdev,
2026 "TC csum action is only offloaded with pedit\n");
2027 return false;
2028 }
2029
2030 if (update_flags & ~prot_flags) {
e98bedf5
EB
2031 NL_SET_ERR_MSG_MOD(extack,
2032 "can't offload TC csum action for some header/s");
26c02749
OG
2033 netdev_warn(priv->netdev,
2034 "can't offload TC csum action for some header/s - flags %#x\n",
2035 update_flags);
2036 return false;
2037 }
2038
2039 return true;
2040}
2041
bdd66ac0 2042static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
e98bedf5
EB
2043 struct tcf_exts *exts,
2044 struct netlink_ext_ack *extack)
bdd66ac0
OG
2045{
2046 const struct tc_action *a;
2047 bool modify_ip_header;
2048 LIST_HEAD(actions);
2049 u8 htype, ip_proto;
2050 void *headers_v;
2051 u16 ethertype;
2052 int nkeys, i;
2053
2054 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2055 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2056
2057 /* for non-IP we only re-write MACs, so we're okay */
2058 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2059 goto out_ok;
2060
2061 modify_ip_header = false;
244cd96a
CW
2062 tcf_exts_for_each_action(i, a, exts) {
2063 int k;
2064
bdd66ac0
OG
2065 if (!is_tcf_pedit(a))
2066 continue;
2067
2068 nkeys = tcf_pedit_nkeys(a);
244cd96a
CW
2069 for (k = 0; k < nkeys; k++) {
2070 htype = tcf_pedit_htype(a, k);
bdd66ac0
OG
2071 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
2072 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
2073 modify_ip_header = true;
2074 break;
2075 }
2076 }
2077 }
2078
2079 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1ccef350
JL
2080 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2081 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
e98bedf5
EB
2082 NL_SET_ERR_MSG_MOD(extack,
2083 "can't offload re-write of non TCP/UDP");
bdd66ac0
OG
2084 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2085 return false;
2086 }
2087
2088out_ok:
2089 return true;
2090}
2091
2092static bool actions_match_supported(struct mlx5e_priv *priv,
2093 struct tcf_exts *exts,
2094 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2095 struct mlx5e_tc_flow *flow,
2096 struct netlink_ext_ack *extack)
bdd66ac0
OG
2097{
2098 u32 actions;
2099
2100 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2101 actions = flow->esw_attr->action;
2102 else
2103 actions = flow->nic_attr->action;
2104
7e29392e
RD
2105 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2106 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2107 return false;
2108
bdd66ac0 2109 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
e98bedf5
EB
2110 return modify_header_match_supported(&parse_attr->spec, exts,
2111 extack);
bdd66ac0
OG
2112
2113 return true;
2114}
2115
5c65c564
OG
2116static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2117{
2118 struct mlx5_core_dev *fmdev, *pmdev;
816f6706 2119 u64 fsystem_guid, psystem_guid;
5c65c564
OG
2120
2121 fmdev = priv->mdev;
2122 pmdev = peer_priv->mdev;
2123
59c9d35e
AH
2124 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2125 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
5c65c564 2126
816f6706 2127 return (fsystem_guid == psystem_guid);
5c65c564
OG
2128}
2129
5c40348c 2130static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
aa0cbbae 2131 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2132 struct mlx5e_tc_flow *flow,
2133 struct netlink_ext_ack *extack)
e3a2b7ed 2134{
aa0cbbae 2135 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
e3a2b7ed 2136 const struct tc_action *a;
22dc13c8 2137 LIST_HEAD(actions);
1cab1cd7 2138 u32 action = 0;
244cd96a 2139 int err, i;
e3a2b7ed 2140
3bcc0cec 2141 if (!tcf_exts_has_actions(exts))
e3a2b7ed
AV
2142 return -EINVAL;
2143
3bc4b7bf 2144 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
e3a2b7ed 2145
244cd96a 2146 tcf_exts_for_each_action(i, a, exts) {
e3a2b7ed 2147 if (is_tcf_gact_shot(a)) {
1cab1cd7 2148 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
2149 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2150 flow_table_properties_nic_receive.flow_counter))
1cab1cd7 2151 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
2152 continue;
2153 }
2154
2f4fe4ca
OG
2155 if (is_tcf_pedit(a)) {
2156 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
e98bedf5 2157 parse_attr, extack);
2f4fe4ca
OG
2158 if (err)
2159 return err;
2160
1cab1cd7
OG
2161 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2162 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2f4fe4ca
OG
2163 continue;
2164 }
2165
26c02749 2166 if (is_tcf_csum(a)) {
1cab1cd7 2167 if (csum_offload_supported(priv, action,
e98bedf5
EB
2168 tcf_csum_update_flags(a),
2169 extack))
26c02749
OG
2170 continue;
2171
2172 return -EOPNOTSUPP;
2173 }
2174
5c65c564
OG
2175 if (is_tcf_mirred_egress_redirect(a)) {
2176 struct net_device *peer_dev = tcf_mirred_dev(a);
2177
2178 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2179 same_hw_devs(priv, netdev_priv(peer_dev))) {
2180 parse_attr->mirred_ifindex = peer_dev->ifindex;
2181 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
1cab1cd7
OG
2182 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2183 MLX5_FLOW_CONTEXT_ACTION_COUNT;
5c65c564 2184 } else {
e98bedf5
EB
2185 NL_SET_ERR_MSG_MOD(extack,
2186 "device is not on same HW, can't offload");
5c65c564
OG
2187 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2188 peer_dev->name);
2189 return -EINVAL;
2190 }
2191 continue;
2192 }
2193
e3a2b7ed
AV
2194 if (is_tcf_skbedit_mark(a)) {
2195 u32 mark = tcf_skbedit_mark(a);
2196
2197 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
e98bedf5
EB
2198 NL_SET_ERR_MSG_MOD(extack,
2199 "Bad flow mark - only 16 bit is supported");
e3a2b7ed
AV
2200 return -EINVAL;
2201 }
2202
3bc4b7bf 2203 attr->flow_tag = mark;
1cab1cd7 2204 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e3a2b7ed
AV
2205 continue;
2206 }
2207
2208 return -EINVAL;
2209 }
2210
1cab1cd7 2211 attr->action = action;
e98bedf5 2212 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
bdd66ac0
OG
2213 return -EOPNOTSUPP;
2214
e3a2b7ed
AV
2215 return 0;
2216}
2217
76f7444d
OG
2218static inline int cmp_encap_info(struct ip_tunnel_key *a,
2219 struct ip_tunnel_key *b)
a54e20b4
HHZ
2220{
2221 return memcmp(a, b, sizeof(*a));
2222}
2223
76f7444d 2224static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 2225{
76f7444d 2226 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
2227}
2228
2229static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
2230 struct net_device *mirred_dev,
2231 struct net_device **out_dev,
2232 struct flowi4 *fl4,
2233 struct neighbour **out_n,
6360cd62 2234 u8 *out_ttl)
a54e20b4 2235{
3e621b19 2236 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5ed99fb4 2237 struct mlx5e_rep_priv *uplink_rpriv;
a54e20b4
HHZ
2238 struct rtable *rt;
2239 struct neighbour *n = NULL;
a54e20b4
HHZ
2240
2241#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
2242 int ret;
2243
a54e20b4 2244 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
2245 ret = PTR_ERR_OR_ZERO(rt);
2246 if (ret)
2247 return ret;
a54e20b4
HHZ
2248#else
2249 return -EOPNOTSUPP;
2250#endif
a4b97ab4 2251 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3e621b19
HHZ
2252 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2253 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
5ed99fb4 2254 *out_dev = uplink_rpriv->netdev;
3e621b19
HHZ
2255 else
2256 *out_dev = rt->dst.dev;
a54e20b4 2257
6360cd62
OG
2258 if (!(*out_ttl))
2259 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
2260 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
2261 ip_rt_put(rt);
2262 if (!n)
2263 return -ENOMEM;
2264
2265 *out_n = n;
a54e20b4
HHZ
2266 return 0;
2267}
2268
b1d90e6b
RL
2269static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2270 struct net_device *peer_netdev)
2271{
2272 struct mlx5e_priv *peer_priv;
2273
2274 peer_priv = netdev_priv(peer_netdev);
2275
2276 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2277 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2278 same_hw_devs(priv, peer_priv) &&
2279 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2280 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2281}
2282
ce99f6b9
OG
2283static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
2284 struct net_device *mirred_dev,
2285 struct net_device **out_dev,
2286 struct flowi6 *fl6,
2287 struct neighbour **out_n,
6360cd62 2288 u8 *out_ttl)
ce99f6b9
OG
2289{
2290 struct neighbour *n = NULL;
2291 struct dst_entry *dst;
2292
2293#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
74bd5d56 2294 struct mlx5e_rep_priv *uplink_rpriv;
ce99f6b9
OG
2295 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2296 int ret;
2297
08820528
PB
2298 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
2299 fl6);
2300 if (ret < 0)
ce99f6b9 2301 return ret;
ce99f6b9 2302
6360cd62
OG
2303 if (!(*out_ttl))
2304 *out_ttl = ip6_dst_hoplimit(dst);
ce99f6b9 2305
a4b97ab4 2306 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
ce99f6b9
OG
2307 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2308 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
5ed99fb4 2309 *out_dev = uplink_rpriv->netdev;
ce99f6b9
OG
2310 else
2311 *out_dev = dst->dev;
2312#else
2313 return -EOPNOTSUPP;
2314#endif
2315
2316 n = dst_neigh_lookup(dst, &fl6->daddr);
2317 dst_release(dst);
2318 if (!n)
2319 return -ENOMEM;
2320
2321 *out_n = n;
2322 return 0;
2323}
2324
32f3671f
OG
2325static void gen_vxlan_header_ipv4(struct net_device *out_dev,
2326 char buf[], int encap_size,
2327 unsigned char h_dest[ETH_ALEN],
f35f800d 2328 u8 tos, u8 ttl,
32f3671f
OG
2329 __be32 daddr,
2330 __be32 saddr,
2331 __be16 udp_dst_port,
2332 __be32 vx_vni)
a54e20b4 2333{
a54e20b4
HHZ
2334 struct ethhdr *eth = (struct ethhdr *)buf;
2335 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
2336 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
2337 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2338
2339 memset(buf, 0, encap_size);
2340
2341 ether_addr_copy(eth->h_dest, h_dest);
2342 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2343 eth->h_proto = htons(ETH_P_IP);
2344
2345 ip->daddr = daddr;
2346 ip->saddr = saddr;
2347
f35f800d 2348 ip->tos = tos;
a54e20b4
HHZ
2349 ip->ttl = ttl;
2350 ip->protocol = IPPROTO_UDP;
2351 ip->version = 0x4;
2352 ip->ihl = 0x5;
2353
2354 udp->dest = udp_dst_port;
2355 vxh->vx_flags = VXLAN_HF_VNI;
2356 vxh->vx_vni = vxlan_vni_field(vx_vni);
a54e20b4
HHZ
2357}
2358
225aabaf
OG
2359static void gen_vxlan_header_ipv6(struct net_device *out_dev,
2360 char buf[], int encap_size,
2361 unsigned char h_dest[ETH_ALEN],
f35f800d 2362 u8 tos, u8 ttl,
225aabaf
OG
2363 struct in6_addr *daddr,
2364 struct in6_addr *saddr,
2365 __be16 udp_dst_port,
2366 __be32 vx_vni)
ce99f6b9 2367{
ce99f6b9
OG
2368 struct ethhdr *eth = (struct ethhdr *)buf;
2369 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
2370 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
2371 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2372
2373 memset(buf, 0, encap_size);
2374
2375 ether_addr_copy(eth->h_dest, h_dest);
2376 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2377 eth->h_proto = htons(ETH_P_IPV6);
2378
f35f800d 2379 ip6_flow_hdr(ip6h, tos, 0);
ce99f6b9
OG
2380 /* the HW fills up ipv6 payload len */
2381 ip6h->nexthdr = IPPROTO_UDP;
2382 ip6h->hop_limit = ttl;
2383 ip6h->daddr = *daddr;
2384 ip6h->saddr = *saddr;
2385
2386 udp->dest = udp_dst_port;
2387 vxh->vx_flags = VXLAN_HF_VNI;
2388 vxh->vx_vni = vxlan_vni_field(vx_vni);
ce99f6b9
OG
2389}
2390
a54e20b4
HHZ
2391static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
2392 struct net_device *mirred_dev,
1a8552bd 2393 struct mlx5e_encap_entry *e)
a54e20b4
HHZ
2394{
2395 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
32f3671f 2396 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
76f7444d 2397 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 2398 struct net_device *out_dev;
a42485eb 2399 struct neighbour *n = NULL;
a54e20b4 2400 struct flowi4 fl4 = {};
f35f800d 2401 u8 nud_state, tos, ttl;
a54e20b4 2402 char *encap_header;
6360cd62 2403 int err;
32f3671f
OG
2404
2405 if (max_encap_size < ipv4_encap_size) {
2406 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2407 ipv4_encap_size, max_encap_size);
2408 return -EOPNOTSUPP;
2409 }
a54e20b4 2410
32f3671f 2411 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
a54e20b4
HHZ
2412 if (!encap_header)
2413 return -ENOMEM;
2414
2415 switch (e->tunnel_type) {
60786f09 2416 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
a54e20b4 2417 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 2418 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
2419 break;
2420 default:
2421 err = -EOPNOTSUPP;
ace74321 2422 goto free_encap;
a54e20b4 2423 }
6360cd62 2424
f35f800d
OG
2425 tos = tun_key->tos;
2426 ttl = tun_key->ttl;
6360cd62 2427
9a941117 2428 fl4.flowi4_tos = tun_key->tos;
76f7444d 2429 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 2430 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4 2431
1a8552bd 2432 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
9a941117 2433 &fl4, &n, &ttl);
a54e20b4 2434 if (err)
ace74321 2435 goto free_encap;
a54e20b4 2436
232c0013
HHZ
2437 /* used by mlx5e_detach_encap to lookup a neigh hash table
2438 * entry in the neigh hash table when a user deletes a rule
2439 */
2440 e->m_neigh.dev = n->dev;
f6dfb4c3 2441 e->m_neigh.family = n->ops->family;
232c0013
HHZ
2442 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2443 e->out_dev = out_dev;
2444
2445 /* It's importent to add the neigh to the hash table before checking
2446 * the neigh validity state. So if we'll get a notification, in case the
2447 * neigh changes it's validity state, we would find the relevant neigh
2448 * in the hash.
2449 */
2450 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2451 if (err)
ace74321 2452 goto free_encap;
232c0013 2453
033354d5
HHZ
2454 read_lock_bh(&n->lock);
2455 nud_state = n->nud_state;
2456 ether_addr_copy(e->h_dest, n->ha);
2457 read_unlock_bh(&n->lock);
2458
a54e20b4 2459 switch (e->tunnel_type) {
60786f09 2460 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1a8552bd 2461 gen_vxlan_header_ipv4(out_dev, encap_header,
f35f800d 2462 ipv4_encap_size, e->h_dest, tos, ttl,
32f3671f
OG
2463 fl4.daddr,
2464 fl4.saddr, tun_key->tp_dst,
2465 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
2466 break;
2467 default:
2468 err = -EOPNOTSUPP;
232c0013
HHZ
2469 goto destroy_neigh_entry;
2470 }
2471 e->encap_size = ipv4_encap_size;
2472 e->encap_header = encap_header;
2473
2474 if (!(nud_state & NUD_VALID)) {
2475 neigh_event_send(n, NULL);
27902f08
WY
2476 err = -EAGAIN;
2477 goto out;
a54e20b4
HHZ
2478 }
2479
60786f09
MB
2480 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
2481 ipv4_encap_size, encap_header,
31ca3648 2482 MLX5_FLOW_NAMESPACE_FDB,
60786f09 2483 &e->encap_id);
232c0013
HHZ
2484 if (err)
2485 goto destroy_neigh_entry;
2486
2487 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 2488 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
232c0013
HHZ
2489 neigh_release(n);
2490 return err;
2491
2492destroy_neigh_entry:
2493 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
ace74321 2494free_encap:
a54e20b4 2495 kfree(encap_header);
ace74321 2496out:
232c0013
HHZ
2497 if (n)
2498 neigh_release(n);
a54e20b4
HHZ
2499 return err;
2500}
2501
ce99f6b9
OG
2502static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2503 struct net_device *mirred_dev,
1a8552bd 2504 struct mlx5e_encap_entry *e)
ce99f6b9
OG
2505{
2506 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
225aabaf 2507 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
ce99f6b9 2508 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 2509 struct net_device *out_dev;
ce99f6b9
OG
2510 struct neighbour *n = NULL;
2511 struct flowi6 fl6 = {};
f35f800d 2512 u8 nud_state, tos, ttl;
ce99f6b9 2513 char *encap_header;
6360cd62 2514 int err;
ce99f6b9 2515
225aabaf
OG
2516 if (max_encap_size < ipv6_encap_size) {
2517 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2518 ipv6_encap_size, max_encap_size);
2519 return -EOPNOTSUPP;
2520 }
ce99f6b9 2521
225aabaf 2522 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
ce99f6b9
OG
2523 if (!encap_header)
2524 return -ENOMEM;
2525
2526 switch (e->tunnel_type) {
60786f09 2527 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
ce99f6b9
OG
2528 fl6.flowi6_proto = IPPROTO_UDP;
2529 fl6.fl6_dport = tun_key->tp_dst;
2530 break;
2531 default:
2532 err = -EOPNOTSUPP;
ace74321 2533 goto free_encap;
ce99f6b9
OG
2534 }
2535
f35f800d
OG
2536 tos = tun_key->tos;
2537 ttl = tun_key->ttl;
6360cd62 2538
ce99f6b9
OG
2539 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2540 fl6.daddr = tun_key->u.ipv6.dst;
2541 fl6.saddr = tun_key->u.ipv6.src;
2542
1a8552bd 2543 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
ce99f6b9
OG
2544 &fl6, &n, &ttl);
2545 if (err)
ace74321 2546 goto free_encap;
ce99f6b9 2547
232c0013
HHZ
2548 /* used by mlx5e_detach_encap to lookup a neigh hash table
2549 * entry in the neigh hash table when a user deletes a rule
2550 */
2551 e->m_neigh.dev = n->dev;
f6dfb4c3 2552 e->m_neigh.family = n->ops->family;
232c0013
HHZ
2553 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2554 e->out_dev = out_dev;
2555
2556 /* It's importent to add the neigh to the hash table before checking
2557 * the neigh validity state. So if we'll get a notification, in case the
2558 * neigh changes it's validity state, we would find the relevant neigh
2559 * in the hash.
2560 */
2561 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2562 if (err)
ace74321 2563 goto free_encap;
232c0013 2564
033354d5
HHZ
2565 read_lock_bh(&n->lock);
2566 nud_state = n->nud_state;
2567 ether_addr_copy(e->h_dest, n->ha);
2568 read_unlock_bh(&n->lock);
2569
ce99f6b9 2570 switch (e->tunnel_type) {
60786f09 2571 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
1a8552bd 2572 gen_vxlan_header_ipv6(out_dev, encap_header,
f35f800d 2573 ipv6_encap_size, e->h_dest, tos, ttl,
225aabaf
OG
2574 &fl6.daddr,
2575 &fl6.saddr, tun_key->tp_dst,
2576 tunnel_id_to_key32(tun_key->tun_id));
ce99f6b9
OG
2577 break;
2578 default:
2579 err = -EOPNOTSUPP;
232c0013
HHZ
2580 goto destroy_neigh_entry;
2581 }
2582
2583 e->encap_size = ipv6_encap_size;
2584 e->encap_header = encap_header;
2585
2586 if (!(nud_state & NUD_VALID)) {
2587 neigh_event_send(n, NULL);
27902f08
WY
2588 err = -EAGAIN;
2589 goto out;
ce99f6b9
OG
2590 }
2591
60786f09
MB
2592 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
2593 ipv6_encap_size, encap_header,
31ca3648 2594 MLX5_FLOW_NAMESPACE_FDB,
60786f09 2595 &e->encap_id);
232c0013
HHZ
2596 if (err)
2597 goto destroy_neigh_entry;
2598
2599 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 2600 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
232c0013
HHZ
2601 neigh_release(n);
2602 return err;
2603
2604destroy_neigh_entry:
2605 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
ace74321 2606free_encap:
ce99f6b9 2607 kfree(encap_header);
ace74321 2608out:
232c0013
HHZ
2609 if (n)
2610 neigh_release(n);
ce99f6b9
OG
2611 return err;
2612}
2613
a54e20b4
HHZ
2614static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2615 struct ip_tunnel_info *tun_info,
2616 struct net_device *mirred_dev,
45247bf2 2617 struct net_device **encap_dev,
e98bedf5
EB
2618 struct mlx5e_tc_flow *flow,
2619 struct netlink_ext_ack *extack)
a54e20b4
HHZ
2620{
2621 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2622 unsigned short family = ip_tunnel_info_af(tun_info);
45247bf2 2623 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
a54e20b4 2624 struct ip_tunnel_key *key = &tun_info->key;
c1ae1152 2625 struct mlx5e_encap_entry *e;
45247bf2 2626 int tunnel_type, err = 0;
a54e20b4
HHZ
2627 uintptr_t hash_key;
2628 bool found = false;
a54e20b4 2629
2fcd82e9 2630 /* udp dst port must be set */
a54e20b4 2631 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 2632 goto vxlan_encap_offload_err;
a54e20b4 2633
cd377663 2634 /* setting udp src port isn't supported */
2fcd82e9
OG
2635 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
2636vxlan_encap_offload_err:
e98bedf5
EB
2637 NL_SET_ERR_MSG_MOD(extack,
2638 "must set udp dst port and not set udp src port");
2fcd82e9
OG
2639 netdev_warn(priv->netdev,
2640 "must set udp dst port and not set udp src port\n");
cd377663 2641 return -EOPNOTSUPP;
2fcd82e9 2642 }
cd377663 2643
a3e67366 2644 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
a54e20b4 2645 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
60786f09 2646 tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
a54e20b4 2647 } else {
e98bedf5
EB
2648 NL_SET_ERR_MSG_MOD(extack,
2649 "port isn't an offloaded vxlan udp dport");
2fcd82e9
OG
2650 netdev_warn(priv->netdev,
2651 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
2652 return -EOPNOTSUPP;
2653 }
2654
76f7444d 2655 hash_key = hash_encap_info(key);
a54e20b4
HHZ
2656
2657 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2658 encap_hlist, hash_key) {
76f7444d 2659 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
2660 found = true;
2661 break;
2662 }
2663 }
2664
b2812089 2665 /* must verify if encap is valid or not */
45247bf2
OG
2666 if (found)
2667 goto attach_flow;
a54e20b4
HHZ
2668
2669 e = kzalloc(sizeof(*e), GFP_KERNEL);
2670 if (!e)
2671 return -ENOMEM;
2672
76f7444d 2673 e->tun_info = *tun_info;
a54e20b4
HHZ
2674 e->tunnel_type = tunnel_type;
2675 INIT_LIST_HEAD(&e->flows);
2676
ce99f6b9 2677 if (family == AF_INET)
1a8552bd 2678 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
ce99f6b9 2679 else if (family == AF_INET6)
1a8552bd 2680 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
ce99f6b9 2681
232c0013 2682 if (err && err != -EAGAIN)
a54e20b4
HHZ
2683 goto out_err;
2684
a54e20b4
HHZ
2685 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2686
45247bf2
OG
2687attach_flow:
2688 list_add(&flow->encap, &e->flows);
2689 *encap_dev = e->out_dev;
232c0013
HHZ
2690 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
2691 attr->encap_id = e->encap_id;
b2812089
VB
2692 else
2693 err = -EAGAIN;
45247bf2 2694
232c0013 2695 return err;
a54e20b4
HHZ
2696
2697out_err:
2698 kfree(e);
2699 return err;
2700}
2701
1482bd3d
JL
2702static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2703 const struct tc_action *a,
2704 struct mlx5_esw_flow_attr *attr,
2705 u32 *action)
2706{
cc495188
JL
2707 u8 vlan_idx = attr->total_vlan;
2708
2709 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2710 return -EOPNOTSUPP;
2711
1482bd3d 2712 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
cc495188
JL
2713 if (vlan_idx) {
2714 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2715 MLX5_FS_VLAN_DEPTH))
2716 return -EOPNOTSUPP;
2717
2718 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2719 } else {
2720 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2721 }
1482bd3d 2722 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
cc495188
JL
2723 attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
2724 attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
2725 attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
2726 if (!attr->vlan_proto[vlan_idx])
2727 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2728
2729 if (vlan_idx) {
2730 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2731 MLX5_FS_VLAN_DEPTH))
2732 return -EOPNOTSUPP;
2733
2734 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2735 } else {
2736 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2737 (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2738 tcf_vlan_push_prio(a)))
2739 return -EOPNOTSUPP;
2740
2741 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1482bd3d
JL
2742 }
2743 } else { /* action is TCA_VLAN_ACT_MODIFY */
2744 return -EOPNOTSUPP;
2745 }
2746
cc495188
JL
2747 attr->total_vlan = vlan_idx + 1;
2748
1482bd3d
JL
2749 return 0;
2750}
2751
03a9d11e 2752static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
d7e75a32 2753 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2754 struct mlx5e_tc_flow *flow,
2755 struct netlink_ext_ack *extack)
03a9d11e 2756{
ecf5bb79 2757 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1d447a39 2758 struct mlx5e_rep_priv *rpriv = priv->ppriv;
a54e20b4 2759 struct ip_tunnel_info *info = NULL;
03a9d11e 2760 const struct tc_action *a;
22dc13c8 2761 LIST_HEAD(actions);
a54e20b4 2762 bool encap = false;
1cab1cd7 2763 u32 action = 0;
244cd96a 2764 int err, i;
03a9d11e 2765
3bcc0cec 2766 if (!tcf_exts_has_actions(exts))
03a9d11e
OG
2767 return -EINVAL;
2768
1d447a39 2769 attr->in_rep = rpriv->rep;
10ff5359 2770 attr->in_mdev = priv->mdev;
03a9d11e 2771
244cd96a 2772 tcf_exts_for_each_action(i, a, exts) {
03a9d11e 2773 if (is_tcf_gact_shot(a)) {
1cab1cd7
OG
2774 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2775 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
2776 continue;
2777 }
2778
d7e75a32
OG
2779 if (is_tcf_pedit(a)) {
2780 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
e98bedf5 2781 parse_attr, extack);
d7e75a32
OG
2782 if (err)
2783 return err;
2784
1cab1cd7 2785 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
592d3651 2786 attr->mirror_count = attr->out_count;
d7e75a32
OG
2787 continue;
2788 }
2789
26c02749 2790 if (is_tcf_csum(a)) {
1cab1cd7 2791 if (csum_offload_supported(priv, action,
e98bedf5
EB
2792 tcf_csum_update_flags(a),
2793 extack))
26c02749
OG
2794 continue;
2795
2796 return -EOPNOTSUPP;
2797 }
2798
592d3651 2799 if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
03a9d11e 2800 struct mlx5e_priv *out_priv;
592d3651 2801 struct net_device *out_dev;
03a9d11e 2802
9f8a739e 2803 out_dev = tcf_mirred_dev(a);
03a9d11e 2804
592d3651 2805 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
e98bedf5
EB
2806 NL_SET_ERR_MSG_MOD(extack,
2807 "can't support more output ports, can't offload forwarding");
592d3651
CM
2808 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2809 attr->out_count);
2810 return -EOPNOTSUPP;
2811 }
2812
a54e20b4 2813 if (switchdev_port_same_parent_id(priv->netdev,
b1d90e6b
RL
2814 out_dev) ||
2815 is_merged_eswitch_dev(priv, out_dev)) {
1cab1cd7
OG
2816 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2817 MLX5_FLOW_CONTEXT_ACTION_COUNT;
a54e20b4 2818 out_priv = netdev_priv(out_dev);
1d447a39 2819 rpriv = out_priv->ppriv;
592d3651
CM
2820 attr->out_rep[attr->out_count] = rpriv->rep;
2821 attr->out_mdev[attr->out_count++] = out_priv->mdev;
a54e20b4 2822 } else if (encap) {
9f8a739e 2823 parse_attr->mirred_ifindex = out_dev->ifindex;
3c37745e
OG
2824 parse_attr->tun_info = *info;
2825 attr->parse_attr = parse_attr;
60786f09 2826 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1cab1cd7
OG
2827 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2828 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3c37745e 2829 /* attr->out_rep is resolved when we handle encap */
a54e20b4 2830 } else {
e98bedf5
EB
2831 NL_SET_ERR_MSG_MOD(extack,
2832 "devices are not on same switch HW, can't offload forwarding");
03a9d11e
OG
2833 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2834 priv->netdev->name, out_dev->name);
2835 return -EINVAL;
2836 }
a54e20b4
HHZ
2837 continue;
2838 }
03a9d11e 2839
a54e20b4
HHZ
2840 if (is_tcf_tunnel_set(a)) {
2841 info = tcf_tunnel_info(a);
2842 if (info)
2843 encap = true;
2844 else
2845 return -EOPNOTSUPP;
592d3651 2846 attr->mirror_count = attr->out_count;
03a9d11e
OG
2847 continue;
2848 }
2849
8b32580d 2850 if (is_tcf_vlan(a)) {
1482bd3d
JL
2851 err = parse_tc_vlan_action(priv, a, attr, &action);
2852
2853 if (err)
2854 return err;
2855
592d3651 2856 attr->mirror_count = attr->out_count;
8b32580d
OG
2857 continue;
2858 }
2859
bbd00f7e 2860 if (is_tcf_tunnel_release(a)) {
1cab1cd7 2861 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
bbd00f7e
HHZ
2862 continue;
2863 }
2864
03a9d11e
OG
2865 return -EINVAL;
2866 }
bdd66ac0 2867
1cab1cd7 2868 attr->action = action;
e98bedf5 2869 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
bdd66ac0
OG
2870 return -EOPNOTSUPP;
2871
592d3651 2872 if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
e98bedf5
EB
2873 NL_SET_ERR_MSG_MOD(extack,
2874 "current firmware doesn't support split rule for port mirroring");
592d3651
CM
2875 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2876 return -EOPNOTSUPP;
2877 }
2878
31c8eba5 2879 return 0;
03a9d11e
OG
2880}
2881
60bd4af8
OG
2882static void get_flags(int flags, u8 *flow_flags)
2883{
2884 u8 __flow_flags = 0;
2885
2886 if (flags & MLX5E_TC_INGRESS)
2887 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2888 if (flags & MLX5E_TC_EGRESS)
2889 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2890
2891 *flow_flags = __flow_flags;
2892}
2893
05866c82
OG
2894static const struct rhashtable_params tc_ht_params = {
2895 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2896 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2897 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2898 .automatic_shrinking = true,
2899};
2900
2901static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
2902{
655dc3d2
OG
2903 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2904 struct mlx5e_rep_priv *uplink_rpriv;
2905
2906 if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
2907 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2908 return &uplink_rpriv->tc_ht;
2909 } else
2910 return &priv->fs.tc.ht;
05866c82
OG
2911}
2912
a88780a9
RD
2913static int
2914mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
2915 struct tc_cls_flower_offload *f, u8 flow_flags,
2916 struct mlx5e_tc_flow_parse_attr **__parse_attr,
2917 struct mlx5e_tc_flow **__flow)
e3a2b7ed 2918{
17091853 2919 struct mlx5e_tc_flow_parse_attr *parse_attr;
3bc4b7bf 2920 struct mlx5e_tc_flow *flow;
a88780a9 2921 int err;
e3a2b7ed 2922
65ba8fb7 2923 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1b9a07ee 2924 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
17091853 2925 if (!parse_attr || !flow) {
e3a2b7ed
AV
2926 err = -ENOMEM;
2927 goto err_free;
2928 }
2929
2930 flow->cookie = f->cookie;
65ba8fb7 2931 flow->flags = flow_flags;
655dc3d2 2932 flow->priv = priv;
e3a2b7ed 2933
17091853 2934 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
a88780a9 2935 if (err)
e3a2b7ed
AV
2936 goto err_free;
2937
a88780a9
RD
2938 *__flow = flow;
2939 *__parse_attr = parse_attr;
2940
2941 return 0;
2942
2943err_free:
2944 kfree(flow);
2945 kvfree(parse_attr);
2946 return err;
2947}
2948
2949static int
2950mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2951 struct tc_cls_flower_offload *f,
2952 u8 flow_flags,
2953 struct mlx5e_tc_flow **__flow)
2954{
2955 struct netlink_ext_ack *extack = f->common.extack;
2956 struct mlx5e_tc_flow_parse_attr *parse_attr;
2957 struct mlx5e_tc_flow *flow;
2958 int attr_size, err;
e3a2b7ed 2959
a88780a9
RD
2960 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2961 attr_size = sizeof(struct mlx5_esw_flow_attr);
2962 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2963 &parse_attr, &flow);
2964 if (err)
2965 goto out;
2966
2967 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
2968 if (err)
2969 goto err_free;
2970
2971 err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
c83954ab
RL
2972 if (err && err != -EAGAIN)
2973 goto err_free;
e3a2b7ed 2974
a88780a9 2975 if (!err)
3c37745e
OG
2976 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2977
a88780a9 2978 if (!(flow->esw_attr->action &
60786f09 2979 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
af1607c3
JL
2980 kvfree(parse_attr);
2981
a88780a9 2982 *__flow = flow;
5c40348c 2983
a88780a9
RD
2984 return 0;
2985
2986err_free:
2987 kfree(flow);
2988 kvfree(parse_attr);
2989out:
232c0013 2990 return err;
a88780a9
RD
2991}
2992
2993static int
2994mlx5e_add_nic_flow(struct mlx5e_priv *priv,
2995 struct tc_cls_flower_offload *f,
2996 u8 flow_flags,
2997 struct mlx5e_tc_flow **__flow)
2998{
2999 struct netlink_ext_ack *extack = f->common.extack;
3000 struct mlx5e_tc_flow_parse_attr *parse_attr;
3001 struct mlx5e_tc_flow *flow;
3002 int attr_size, err;
3003
3004 flow_flags |= MLX5E_TC_FLOW_NIC;
3005 attr_size = sizeof(struct mlx5_nic_flow_attr);
3006 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3007 &parse_attr, &flow);
3008 if (err)
3009 goto out;
3010
3011 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
3012 if (err)
3013 goto err_free;
3014
3015 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
3016 if (err)
3017 goto err_free;
3018
3019 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
3020 kvfree(parse_attr);
3021 *__flow = flow;
3022
3023 return 0;
e3a2b7ed 3024
e3a2b7ed 3025err_free:
a88780a9 3026 kfree(flow);
17091853 3027 kvfree(parse_attr);
a88780a9
RD
3028out:
3029 return err;
3030}
3031
3032static int
3033mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3034 struct tc_cls_flower_offload *f,
3035 int flags,
3036 struct mlx5e_tc_flow **flow)
3037{
3038 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3039 u8 flow_flags;
3040 int err;
3041
3042 get_flags(flags, &flow_flags);
3043
3044 if (esw && esw->mode == SRIOV_OFFLOADS)
3045 err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow);
3046 else
3047 err = mlx5e_add_nic_flow(priv, f, flow_flags, flow);
3048
3049 return err;
3050}
3051
3052int mlx5e_configure_flower(struct mlx5e_priv *priv,
3053 struct tc_cls_flower_offload *f, int flags)
3054{
3055 struct netlink_ext_ack *extack = f->common.extack;
3056 struct rhashtable *tc_ht = get_tc_ht(priv);
3057 struct mlx5e_tc_flow *flow;
3058 int err = 0;
3059
3060 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3061 if (flow) {
3062 NL_SET_ERR_MSG_MOD(extack,
3063 "flow cookie already exists, ignoring");
3064 netdev_warn_once(priv->netdev,
3065 "flow cookie %lx already exists, ignoring\n",
3066 f->cookie);
3067 goto out;
3068 }
3069
3070 err = mlx5e_tc_add_flow(priv, f, flags, &flow);
3071 if (err)
3072 goto out;
3073
3074 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
3075 if (err)
3076 goto err_free;
3077
3078 return 0;
3079
3080err_free:
3081 mlx5e_tc_del_flow(priv, flow);
232c0013 3082 kfree(flow);
a88780a9 3083out:
e3a2b7ed
AV
3084 return err;
3085}
3086
8f8ae895
OG
3087#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
3088#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
3089
3090static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
3091{
3092 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
3093 return true;
3094
3095 return false;
3096}
3097
e3a2b7ed 3098int mlx5e_delete_flower(struct mlx5e_priv *priv,
60bd4af8 3099 struct tc_cls_flower_offload *f, int flags)
e3a2b7ed 3100{
05866c82 3101 struct rhashtable *tc_ht = get_tc_ht(priv);
e3a2b7ed 3102 struct mlx5e_tc_flow *flow;
e3a2b7ed 3103
05866c82 3104 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
8f8ae895 3105 if (!flow || !same_flow_direction(flow, flags))
e3a2b7ed
AV
3106 return -EINVAL;
3107
05866c82 3108 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
e3a2b7ed 3109
961e8979 3110 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
3111
3112 kfree(flow);
3113
3114 return 0;
3115}
3116
aad7e08d 3117int mlx5e_stats_flower(struct mlx5e_priv *priv,
60bd4af8 3118 struct tc_cls_flower_offload *f, int flags)
aad7e08d 3119{
05866c82 3120 struct rhashtable *tc_ht = get_tc_ht(priv);
aad7e08d 3121 struct mlx5e_tc_flow *flow;
aad7e08d
AV
3122 struct mlx5_fc *counter;
3123 u64 bytes;
3124 u64 packets;
3125 u64 lastuse;
3126
05866c82 3127 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
8f8ae895 3128 if (!flow || !same_flow_direction(flow, flags))
aad7e08d
AV
3129 return -EINVAL;
3130
0b67a38f
HHZ
3131 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
3132 return 0;
3133
b8aee822 3134 counter = mlx5e_tc_get_counter(flow);
aad7e08d
AV
3135 if (!counter)
3136 return 0;
3137
3138 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3139
d897a638 3140 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
fed06ee8 3141
aad7e08d
AV
3142 return 0;
3143}
3144
4d8fcf21
AH
3145static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3146 struct mlx5e_priv *peer_priv)
3147{
3148 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3149 struct mlx5e_hairpin_entry *hpe;
3150 u16 peer_vhca_id;
3151 int bkt;
3152
3153 if (!same_hw_devs(priv, peer_priv))
3154 return;
3155
3156 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3157
3158 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
3159 if (hpe->peer_vhca_id == peer_vhca_id)
3160 hpe->hp->pair->peer_gone = true;
3161 }
3162}
3163
3164static int mlx5e_tc_netdev_event(struct notifier_block *this,
3165 unsigned long event, void *ptr)
3166{
3167 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3168 struct mlx5e_flow_steering *fs;
3169 struct mlx5e_priv *peer_priv;
3170 struct mlx5e_tc_table *tc;
3171 struct mlx5e_priv *priv;
3172
3173 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3174 event != NETDEV_UNREGISTER ||
3175 ndev->reg_state == NETREG_REGISTERED)
3176 return NOTIFY_DONE;
3177
3178 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3179 fs = container_of(tc, struct mlx5e_flow_steering, tc);
3180 priv = container_of(fs, struct mlx5e_priv, fs);
3181 peer_priv = netdev_priv(ndev);
3182 if (priv == peer_priv ||
3183 !(priv->netdev->features & NETIF_F_HW_TC))
3184 return NOTIFY_DONE;
3185
3186 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3187
3188 return NOTIFY_DONE;
3189}
3190
655dc3d2 3191int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
e8f887ac 3192{
acff797c 3193 struct mlx5e_tc_table *tc = &priv->fs.tc;
4d8fcf21 3194 int err;
e8f887ac 3195
11c9c548 3196 hash_init(tc->mod_hdr_tbl);
5c65c564 3197 hash_init(tc->hairpin_tbl);
11c9c548 3198
4d8fcf21
AH
3199 err = rhashtable_init(&tc->ht, &tc_ht_params);
3200 if (err)
3201 return err;
3202
3203 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3204 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3205 tc->netdevice_nb.notifier_call = NULL;
3206 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3207 }
3208
3209 return err;
e8f887ac
AV
3210}
3211
3212static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3213{
3214 struct mlx5e_tc_flow *flow = ptr;
655dc3d2 3215 struct mlx5e_priv *priv = flow->priv;
e8f887ac 3216
961e8979 3217 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
3218 kfree(flow);
3219}
3220
655dc3d2 3221void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
e8f887ac 3222{
acff797c 3223 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac 3224
4d8fcf21
AH
3225 if (tc->netdevice_nb.notifier_call)
3226 unregister_netdevice_notifier(&tc->netdevice_nb);
3227
655dc3d2 3228 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
e8f887ac 3229
acff797c
MG
3230 if (!IS_ERR_OR_NULL(tc->t)) {
3231 mlx5_destroy_flow_table(tc->t);
3232 tc->t = NULL;
e8f887ac
AV
3233 }
3234}
655dc3d2
OG
3235
3236int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3237{
3238 return rhashtable_init(tc_ht, &tc_ht_params);
3239}
3240
3241void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3242{
3243 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3244}
01252a27
OG
3245
3246int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
3247{
3248 struct rhashtable *tc_ht = get_tc_ht(priv);
3249
3250 return atomic_read(&tc_ht->nelems);
3251}