net/mlx5: E-Switch, Rename esw attr mirror count field
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
d79b6df6 45#include <net/tc_act/tc_pedit.h>
26c02749 46#include <net/tc_act/tc_csum.h>
f6dfb4c3 47#include <net/arp.h>
e8f887ac 48#include "en.h"
1d447a39 49#include "en_rep.h"
232c0013 50#include "en_tc.h"
03a9d11e 51#include "eswitch.h"
3f6d08d1 52#include "fs_core.h"
2c81bfd5 53#include "en/port.h"
101f4de9 54#include "en/tc_tun.h"
e8f887ac 55
3bc4b7bf
OG
56struct mlx5_nic_flow_attr {
57 u32 action;
58 u32 flow_tag;
2f4fe4ca 59 u32 mod_hdr_id;
5c65c564 60 u32 hairpin_tirn;
38aa51c1 61 u8 match_level;
3f6d08d1 62 struct mlx5_flow_table *hairpin_ft;
b8aee822 63 struct mlx5_fc *counter;
3bc4b7bf
OG
64};
65
60bd4af8
OG
66#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
67
65ba8fb7 68enum {
60bd4af8
OG
69 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
70 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
71 MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
72 MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
73 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
74 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
75 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
5dbe906f 76 MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5),
65ba8fb7
OG
77};
78
e4ad91f2
CM
79#define MLX5E_TC_MAX_SPLITS 1
80
e8f887ac
AV
81struct mlx5e_tc_flow {
82 struct rhash_head node;
655dc3d2 83 struct mlx5e_priv *priv;
e8f887ac 84 u64 cookie;
5dbe906f 85 u16 flags;
e4ad91f2 86 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
11c9c548
OG
87 struct list_head encap; /* flows sharing the same encap ID */
88 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
5c65c564 89 struct list_head hairpin; /* flows sharing the same hairpin */
3bc4b7bf
OG
90 union {
91 struct mlx5_esw_flow_attr esw_attr[0];
92 struct mlx5_nic_flow_attr nic_attr[0];
93 };
e8f887ac
AV
94};
95
17091853 96struct mlx5e_tc_flow_parse_attr {
3c37745e 97 struct ip_tunnel_info tun_info;
d11afc26 98 struct net_device *filter_dev;
17091853 99 struct mlx5_flow_spec spec;
d79b6df6
OG
100 int num_mod_hdr_actions;
101 void *mod_hdr_actions;
3c37745e 102 int mirred_ifindex;
17091853
OG
103};
104
acff797c 105#define MLX5E_TC_TABLE_NUM_GROUPS 4
b3a433de 106#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
e8f887ac 107
77ab67b7
OG
108struct mlx5e_hairpin {
109 struct mlx5_hairpin *pair;
110
111 struct mlx5_core_dev *func_mdev;
3f6d08d1 112 struct mlx5e_priv *func_priv;
77ab67b7
OG
113 u32 tdn;
114 u32 tirn;
3f6d08d1
OG
115
116 int num_channels;
117 struct mlx5e_rqt indir_rqt;
118 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
119 struct mlx5e_ttc_table ttc;
77ab67b7
OG
120};
121
5c65c564
OG
122struct mlx5e_hairpin_entry {
123 /* a node of a hash table which keeps all the hairpin entries */
124 struct hlist_node hairpin_hlist;
125
126 /* flows sharing the same hairpin */
127 struct list_head flows;
128
d8822868 129 u16 peer_vhca_id;
106be53b 130 u8 prio;
5c65c564
OG
131 struct mlx5e_hairpin *hp;
132};
133
11c9c548
OG
134struct mod_hdr_key {
135 int num_actions;
136 void *actions;
137};
138
139struct mlx5e_mod_hdr_entry {
140 /* a node of a hash table which keeps all the mod_hdr entries */
141 struct hlist_node mod_hdr_hlist;
142
143 /* flows sharing the same mod_hdr entry */
144 struct list_head flows;
145
146 struct mod_hdr_key key;
147
148 u32 mod_hdr_id;
149};
150
151#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
152
153static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
154{
155 return jhash(key->actions,
156 key->num_actions * MLX5_MH_ACT_SZ, 0);
157}
158
159static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
160 struct mod_hdr_key *b)
161{
162 if (a->num_actions != b->num_actions)
163 return 1;
164
165 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
166}
167
168static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
169 struct mlx5e_tc_flow *flow,
170 struct mlx5e_tc_flow_parse_attr *parse_attr)
171{
172 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
173 int num_actions, actions_size, namespace, err;
174 struct mlx5e_mod_hdr_entry *mh;
175 struct mod_hdr_key key;
176 bool found = false;
177 u32 hash_key;
178
179 num_actions = parse_attr->num_mod_hdr_actions;
180 actions_size = MLX5_MH_ACT_SZ * num_actions;
181
182 key.actions = parse_attr->mod_hdr_actions;
183 key.num_actions = num_actions;
184
185 hash_key = hash_mod_hdr_info(&key);
186
187 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
188 namespace = MLX5_FLOW_NAMESPACE_FDB;
189 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
190 mod_hdr_hlist, hash_key) {
191 if (!cmp_mod_hdr_info(&mh->key, &key)) {
192 found = true;
193 break;
194 }
195 }
196 } else {
197 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
198 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
199 mod_hdr_hlist, hash_key) {
200 if (!cmp_mod_hdr_info(&mh->key, &key)) {
201 found = true;
202 break;
203 }
204 }
205 }
206
207 if (found)
208 goto attach_flow;
209
210 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
211 if (!mh)
212 return -ENOMEM;
213
214 mh->key.actions = (void *)mh + sizeof(*mh);
215 memcpy(mh->key.actions, key.actions, actions_size);
216 mh->key.num_actions = num_actions;
217 INIT_LIST_HEAD(&mh->flows);
218
219 err = mlx5_modify_header_alloc(priv->mdev, namespace,
220 mh->key.num_actions,
221 mh->key.actions,
222 &mh->mod_hdr_id);
223 if (err)
224 goto out_err;
225
226 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
227 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
228 else
229 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
230
231attach_flow:
232 list_add(&flow->mod_hdr, &mh->flows);
233 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
234 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
235 else
236 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
237
238 return 0;
239
240out_err:
241 kfree(mh);
242 return err;
243}
244
245static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
246 struct mlx5e_tc_flow *flow)
247{
248 struct list_head *next = flow->mod_hdr.next;
249
250 list_del(&flow->mod_hdr);
251
252 if (list_empty(next)) {
253 struct mlx5e_mod_hdr_entry *mh;
254
255 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
256
257 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
258 hash_del(&mh->mod_hdr_hlist);
259 kfree(mh);
260 }
261}
262
77ab67b7
OG
263static
264struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
265{
266 struct net_device *netdev;
267 struct mlx5e_priv *priv;
268
269 netdev = __dev_get_by_index(net, ifindex);
270 priv = netdev_priv(netdev);
271 return priv->mdev;
272}
273
274static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
275{
276 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
277 void *tirc;
278 int err;
279
280 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
281 if (err)
282 goto alloc_tdn_err;
283
284 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
285
286 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
ddae74ac 287 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
77ab67b7
OG
288 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
289
290 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
291 if (err)
292 goto create_tir_err;
293
294 return 0;
295
296create_tir_err:
297 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
298alloc_tdn_err:
299 return err;
300}
301
302static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
303{
304 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
305 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
306}
307
3f6d08d1
OG
308static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
309{
310 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
311 struct mlx5e_priv *priv = hp->func_priv;
312 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
313
314 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
315 hp->num_channels);
316
317 for (i = 0; i < sz; i++) {
318 ix = i;
bbeb53b8 319 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
3f6d08d1
OG
320 ix = mlx5e_bits_invert(i, ilog2(sz));
321 ix = indirection_rqt[ix];
322 rqn = hp->pair->rqn[ix];
323 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
324 }
325}
326
327static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
328{
329 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
330 struct mlx5e_priv *priv = hp->func_priv;
331 struct mlx5_core_dev *mdev = priv->mdev;
332 void *rqtc;
333 u32 *in;
334
335 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
336 in = kvzalloc(inlen, GFP_KERNEL);
337 if (!in)
338 return -ENOMEM;
339
340 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
341
342 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
343 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
344
345 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
346
347 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
348 if (!err)
349 hp->indir_rqt.enabled = true;
350
351 kvfree(in);
352 return err;
353}
354
355static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
356{
357 struct mlx5e_priv *priv = hp->func_priv;
358 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
359 int tt, i, err;
360 void *tirc;
361
362 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
d930ac79
AL
363 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
364
3f6d08d1
OG
365 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
366 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
367
368 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
369 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
370 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
bbeb53b8
AL
371 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
372
3f6d08d1
OG
373 err = mlx5_core_create_tir(hp->func_mdev, in,
374 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
375 if (err) {
376 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
377 goto err_destroy_tirs;
378 }
379 }
380 return 0;
381
382err_destroy_tirs:
383 for (i = 0; i < tt; i++)
384 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
385 return err;
386}
387
388static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
389{
390 int tt;
391
392 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
393 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
394}
395
396static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
397 struct ttc_params *ttc_params)
398{
399 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
400 int tt;
401
402 memset(ttc_params, 0, sizeof(*ttc_params));
403
404 ttc_params->any_tt_tirn = hp->tirn;
405
406 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
407 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
408
409 ft_attr->max_fte = MLX5E_NUM_TT;
410 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
411 ft_attr->prio = MLX5E_TC_PRIO;
412}
413
414static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
415{
416 struct mlx5e_priv *priv = hp->func_priv;
417 struct ttc_params ttc_params;
418 int err;
419
420 err = mlx5e_hairpin_create_indirect_rqt(hp);
421 if (err)
422 return err;
423
424 err = mlx5e_hairpin_create_indirect_tirs(hp);
425 if (err)
426 goto err_create_indirect_tirs;
427
428 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
429 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
430 if (err)
431 goto err_create_ttc_table;
432
433 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
434 hp->num_channels, hp->ttc.ft.t->id);
435
436 return 0;
437
438err_create_ttc_table:
439 mlx5e_hairpin_destroy_indirect_tirs(hp);
440err_create_indirect_tirs:
441 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
442
443 return err;
444}
445
446static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
447{
448 struct mlx5e_priv *priv = hp->func_priv;
449
450 mlx5e_destroy_ttc_table(priv, &hp->ttc);
451 mlx5e_hairpin_destroy_indirect_tirs(hp);
452 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
453}
454
77ab67b7
OG
455static struct mlx5e_hairpin *
456mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
457 int peer_ifindex)
458{
459 struct mlx5_core_dev *func_mdev, *peer_mdev;
460 struct mlx5e_hairpin *hp;
461 struct mlx5_hairpin *pair;
462 int err;
463
464 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
465 if (!hp)
466 return ERR_PTR(-ENOMEM);
467
468 func_mdev = priv->mdev;
469 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
470
471 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
472 if (IS_ERR(pair)) {
473 err = PTR_ERR(pair);
474 goto create_pair_err;
475 }
476 hp->pair = pair;
477 hp->func_mdev = func_mdev;
3f6d08d1
OG
478 hp->func_priv = priv;
479 hp->num_channels = params->num_channels;
77ab67b7
OG
480
481 err = mlx5e_hairpin_create_transport(hp);
482 if (err)
483 goto create_transport_err;
484
3f6d08d1
OG
485 if (hp->num_channels > 1) {
486 err = mlx5e_hairpin_rss_init(hp);
487 if (err)
488 goto rss_init_err;
489 }
490
77ab67b7
OG
491 return hp;
492
3f6d08d1
OG
493rss_init_err:
494 mlx5e_hairpin_destroy_transport(hp);
77ab67b7
OG
495create_transport_err:
496 mlx5_core_hairpin_destroy(hp->pair);
497create_pair_err:
498 kfree(hp);
499 return ERR_PTR(err);
500}
501
502static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
503{
3f6d08d1
OG
504 if (hp->num_channels > 1)
505 mlx5e_hairpin_rss_cleanup(hp);
77ab67b7
OG
506 mlx5e_hairpin_destroy_transport(hp);
507 mlx5_core_hairpin_destroy(hp->pair);
508 kvfree(hp);
509}
510
106be53b
OG
511static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
512{
513 return (peer_vhca_id << 16 | prio);
514}
515
5c65c564 516static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
106be53b 517 u16 peer_vhca_id, u8 prio)
5c65c564
OG
518{
519 struct mlx5e_hairpin_entry *hpe;
106be53b 520 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
5c65c564
OG
521
522 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
106be53b
OG
523 hairpin_hlist, hash_key) {
524 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
5c65c564
OG
525 return hpe;
526 }
527
528 return NULL;
529}
530
106be53b
OG
531#define UNKNOWN_MATCH_PRIO 8
532
533static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
e98bedf5
EB
534 struct mlx5_flow_spec *spec, u8 *match_prio,
535 struct netlink_ext_ack *extack)
106be53b
OG
536{
537 void *headers_c, *headers_v;
538 u8 prio_val, prio_mask = 0;
539 bool vlan_present;
540
541#ifdef CONFIG_MLX5_CORE_EN_DCB
542 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
e98bedf5
EB
543 NL_SET_ERR_MSG_MOD(extack,
544 "only PCP trust state supported for hairpin");
106be53b
OG
545 return -EOPNOTSUPP;
546 }
547#endif
548 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
549 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
550
551 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
552 if (vlan_present) {
553 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
554 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
555 }
556
557 if (!vlan_present || !prio_mask) {
558 prio_val = UNKNOWN_MATCH_PRIO;
559 } else if (prio_mask != 0x7) {
e98bedf5
EB
560 NL_SET_ERR_MSG_MOD(extack,
561 "masked priority match not supported for hairpin");
106be53b
OG
562 return -EOPNOTSUPP;
563 }
564
565 *match_prio = prio_val;
566 return 0;
567}
568
5c65c564
OG
569static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
570 struct mlx5e_tc_flow *flow,
e98bedf5
EB
571 struct mlx5e_tc_flow_parse_attr *parse_attr,
572 struct netlink_ext_ack *extack)
5c65c564
OG
573{
574 int peer_ifindex = parse_attr->mirred_ifindex;
575 struct mlx5_hairpin_params params;
d8822868 576 struct mlx5_core_dev *peer_mdev;
5c65c564
OG
577 struct mlx5e_hairpin_entry *hpe;
578 struct mlx5e_hairpin *hp;
3f6d08d1
OG
579 u64 link_speed64;
580 u32 link_speed;
106be53b 581 u8 match_prio;
d8822868 582 u16 peer_id;
5c65c564
OG
583 int err;
584
d8822868
OG
585 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
586 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
e98bedf5 587 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
5c65c564
OG
588 return -EOPNOTSUPP;
589 }
590
d8822868 591 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
e98bedf5
EB
592 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
593 extack);
106be53b
OG
594 if (err)
595 return err;
596 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
5c65c564
OG
597 if (hpe)
598 goto attach_flow;
599
600 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
601 if (!hpe)
602 return -ENOMEM;
603
604 INIT_LIST_HEAD(&hpe->flows);
d8822868 605 hpe->peer_vhca_id = peer_id;
106be53b 606 hpe->prio = match_prio;
5c65c564
OG
607
608 params.log_data_size = 15;
609 params.log_data_size = min_t(u8, params.log_data_size,
610 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
611 params.log_data_size = max_t(u8, params.log_data_size,
612 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
5c65c564 613
eb9180f7
OG
614 params.log_num_packets = params.log_data_size -
615 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
616 params.log_num_packets = min_t(u8, params.log_num_packets,
617 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
618
619 params.q_counter = priv->q_counter;
3f6d08d1 620 /* set hairpin pair per each 50Gbs share of the link */
2c81bfd5 621 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
3f6d08d1
OG
622 link_speed = max_t(u32, link_speed, 50000);
623 link_speed64 = link_speed;
624 do_div(link_speed64, 50000);
625 params.num_channels = link_speed64;
626
5c65c564
OG
627 hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
628 if (IS_ERR(hp)) {
629 err = PTR_ERR(hp);
630 goto create_hairpin_err;
631 }
632
eb9180f7 633 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
ddae74ac 634 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
eb9180f7 635 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
5c65c564
OG
636
637 hpe->hp = hp;
106be53b
OG
638 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
639 hash_hairpin_info(peer_id, match_prio));
5c65c564
OG
640
641attach_flow:
3f6d08d1
OG
642 if (hpe->hp->num_channels > 1) {
643 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
644 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
645 } else {
646 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
647 }
5c65c564 648 list_add(&flow->hairpin, &hpe->flows);
3f6d08d1 649
5c65c564
OG
650 return 0;
651
652create_hairpin_err:
653 kfree(hpe);
654 return err;
655}
656
657static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
658 struct mlx5e_tc_flow *flow)
659{
660 struct list_head *next = flow->hairpin.next;
661
662 list_del(&flow->hairpin);
663
664 /* no more hairpin flows for us, release the hairpin pair */
665 if (list_empty(next)) {
666 struct mlx5e_hairpin_entry *hpe;
667
668 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
669
670 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
671 hpe->hp->pair->peer_mdev->priv.name);
672
673 mlx5e_hairpin_destroy(hpe->hp);
674 hash_del(&hpe->hairpin_hlist);
675 kfree(hpe);
676 }
677}
678
c83954ab 679static int
74491de9 680mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
17091853 681 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
682 struct mlx5e_tc_flow *flow,
683 struct netlink_ext_ack *extack)
e8f887ac 684{
aa0cbbae 685 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
aad7e08d 686 struct mlx5_core_dev *dev = priv->mdev;
5c65c564 687 struct mlx5_flow_destination dest[2] = {};
66958ed9 688 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
689 .action = attr->action,
690 .flow_tag = attr->flow_tag,
60786f09 691 .reformat_id = 0,
42f7ad67 692 .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
66958ed9 693 };
aad7e08d 694 struct mlx5_fc *counter = NULL;
e8f887ac 695 bool table_created = false;
5c65c564 696 int err, dest_ix = 0;
e8f887ac 697
3f6d08d1 698 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
e98bedf5 699 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
3f6d08d1 700 if (err) {
3f6d08d1
OG
701 goto err_add_hairpin_flow;
702 }
703 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
704 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
705 dest[dest_ix].ft = attr->hairpin_ft;
706 } else {
5c65c564
OG
707 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
708 dest[dest_ix].tir_num = attr->hairpin_tirn;
5c65c564
OG
709 }
710 dest_ix++;
3f6d08d1
OG
711 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
712 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
713 dest[dest_ix].ft = priv->fs.vlan.ft.t;
714 dest_ix++;
5c65c564 715 }
aad7e08d 716
5c65c564
OG
717 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
718 counter = mlx5_fc_create(dev, true);
719 if (IS_ERR(counter)) {
c83954ab 720 err = PTR_ERR(counter);
5c65c564
OG
721 goto err_fc_create;
722 }
723 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 724 dest[dest_ix].counter_id = mlx5_fc_id(counter);
5c65c564 725 dest_ix++;
b8aee822 726 attr->counter = counter;
aad7e08d
AV
727 }
728
2f4fe4ca 729 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3099eb5a 730 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
d7e75a32 731 flow_act.modify_id = attr->mod_hdr_id;
2f4fe4ca 732 kfree(parse_attr->mod_hdr_actions);
c83954ab 733 if (err)
2f4fe4ca 734 goto err_create_mod_hdr_id;
2f4fe4ca
OG
735 }
736
acff797c 737 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
21b9c144
OG
738 int tc_grp_size, tc_tbl_size;
739 u32 max_flow_counter;
740
741 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
742 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
743
744 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
745
746 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
747 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
748
acff797c
MG
749 priv->fs.tc.t =
750 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
751 MLX5E_TC_PRIO,
21b9c144 752 tc_tbl_size,
acff797c 753 MLX5E_TC_TABLE_NUM_GROUPS,
3f6d08d1 754 MLX5E_TC_FT_LEVEL, 0);
acff797c 755 if (IS_ERR(priv->fs.tc.t)) {
e98bedf5
EB
756 NL_SET_ERR_MSG_MOD(extack,
757 "Failed to create tc offload table\n");
e8f887ac
AV
758 netdev_err(priv->netdev,
759 "Failed to create tc offload table\n");
c83954ab 760 err = PTR_ERR(priv->fs.tc.t);
aad7e08d 761 goto err_create_ft;
e8f887ac
AV
762 }
763
764 table_created = true;
765 }
766
38aa51c1
OG
767 if (attr->match_level != MLX5_MATCH_NONE)
768 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
769
c83954ab
RL
770 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
771 &flow_act, dest, dest_ix);
aad7e08d 772
c83954ab
RL
773 if (IS_ERR(flow->rule[0])) {
774 err = PTR_ERR(flow->rule[0]);
aad7e08d 775 goto err_add_rule;
c83954ab 776 }
aad7e08d 777
c83954ab 778 return 0;
e8f887ac 779
aad7e08d
AV
780err_add_rule:
781 if (table_created) {
acff797c
MG
782 mlx5_destroy_flow_table(priv->fs.tc.t);
783 priv->fs.tc.t = NULL;
e8f887ac 784 }
aad7e08d 785err_create_ft:
2f4fe4ca 786 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3099eb5a 787 mlx5e_detach_mod_hdr(priv, flow);
2f4fe4ca 788err_create_mod_hdr_id:
aad7e08d 789 mlx5_fc_destroy(dev, counter);
5c65c564
OG
790err_fc_create:
791 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
792 mlx5e_hairpin_flow_del(priv, flow);
793err_add_hairpin_flow:
c83954ab 794 return err;
e8f887ac
AV
795}
796
d85cdccb
OG
797static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
798 struct mlx5e_tc_flow *flow)
799{
513f8f7f 800 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
d85cdccb
OG
801 struct mlx5_fc *counter = NULL;
802
b8aee822 803 counter = attr->counter;
e4ad91f2 804 mlx5_del_flow_rules(flow->rule[0]);
aa0cbbae 805 mlx5_fc_destroy(priv->mdev, counter);
d85cdccb 806
b3a433de 807 if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
d85cdccb
OG
808 mlx5_destroy_flow_table(priv->fs.tc.t);
809 priv->fs.tc.t = NULL;
810 }
2f4fe4ca 811
513f8f7f 812 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3099eb5a 813 mlx5e_detach_mod_hdr(priv, flow);
5c65c564
OG
814
815 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
816 mlx5e_hairpin_flow_del(priv, flow);
d85cdccb
OG
817}
818
aa0cbbae
OG
819static void mlx5e_detach_encap(struct mlx5e_priv *priv,
820 struct mlx5e_tc_flow *flow);
821
3c37745e
OG
822static int mlx5e_attach_encap(struct mlx5e_priv *priv,
823 struct ip_tunnel_info *tun_info,
824 struct net_device *mirred_dev,
825 struct net_device **encap_dev,
e98bedf5
EB
826 struct mlx5e_tc_flow *flow,
827 struct netlink_ext_ack *extack);
3c37745e 828
6d2a3ed0
OG
829static struct mlx5_flow_handle *
830mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
831 struct mlx5e_tc_flow *flow,
832 struct mlx5_flow_spec *spec,
833 struct mlx5_esw_flow_attr *attr)
834{
835 struct mlx5_flow_handle *rule;
836
837 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
838 if (IS_ERR(rule))
839 return rule;
840
e85e02ba 841 if (attr->split_count) {
6d2a3ed0
OG
842 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
843 if (IS_ERR(flow->rule[1])) {
844 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
845 return flow->rule[1];
846 }
847 }
848
849 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
850 return rule;
851}
852
853static void
854mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
855 struct mlx5e_tc_flow *flow,
856 struct mlx5_esw_flow_attr *attr)
857{
858 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
859
e85e02ba 860 if (attr->split_count)
6d2a3ed0
OG
861 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
862
863 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
864}
865
5dbe906f
PB
866static struct mlx5_flow_handle *
867mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
868 struct mlx5e_tc_flow *flow,
869 struct mlx5_flow_spec *spec,
870 struct mlx5_esw_flow_attr *slow_attr)
871{
872 struct mlx5_flow_handle *rule;
873
874 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
875 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
e85e02ba 876 slow_attr->split_count = 0,
5dbe906f
PB
877 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
878
879 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
880 if (!IS_ERR(rule))
881 flow->flags |= MLX5E_TC_FLOW_SLOW;
882
883 return rule;
884}
885
886static void
887mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
888 struct mlx5e_tc_flow *flow,
889 struct mlx5_esw_flow_attr *slow_attr)
890{
891 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
892 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
893 flow->flags &= ~MLX5E_TC_FLOW_SLOW;
894}
895
c83954ab 896static int
74491de9 897mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
17091853 898 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
899 struct mlx5e_tc_flow *flow,
900 struct netlink_ext_ack *extack)
adb4c123
OG
901{
902 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
bf07aa73 903 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
aa0cbbae 904 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bf07aa73 905 u16 max_prio = mlx5_eswitch_get_prio_range(esw);
3c37745e 906 struct net_device *out_dev, *encap_dev = NULL;
b8aee822 907 struct mlx5_fc *counter = NULL;
3c37745e
OG
908 struct mlx5e_rep_priv *rpriv;
909 struct mlx5e_priv *out_priv;
c83954ab 910 int err = 0, encap_err = 0;
8b32580d 911
bf07aa73
PB
912 /* if prios are not supported, keep the old behaviour of using same prio
913 * for all offloaded rules.
914 */
915 if (!mlx5_eswitch_prios_supported(esw))
916 attr->prio = 1;
917
918 if (attr->chain > max_chain) {
919 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
920 err = -EOPNOTSUPP;
921 goto err_max_prio_chain;
922 }
923
924 if (attr->prio > max_prio) {
925 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
926 err = -EOPNOTSUPP;
927 goto err_max_prio_chain;
928 }
e52c2802 929
60786f09 930 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
3c37745e
OG
931 out_dev = __dev_get_by_index(dev_net(priv->netdev),
932 attr->parse_attr->mirred_ifindex);
c83954ab
RL
933 encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
934 out_dev, &encap_dev, flow,
935 extack);
936 if (encap_err && encap_err != -EAGAIN) {
937 err = encap_err;
938 goto err_attach_encap;
3c37745e
OG
939 }
940 out_priv = netdev_priv(encap_dev);
941 rpriv = out_priv->ppriv;
592d3651
CM
942 attr->out_rep[attr->out_count] = rpriv->rep;
943 attr->out_mdev[attr->out_count++] = out_priv->mdev;
3c37745e
OG
944 }
945
8b32580d 946 err = mlx5_eswitch_add_vlan_action(esw, attr);
c83954ab 947 if (err)
aa0cbbae 948 goto err_add_vlan;
adb4c123 949
d7e75a32 950 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1a9527bb 951 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
d7e75a32 952 kfree(parse_attr->mod_hdr_actions);
c83954ab 953 if (err)
d7e75a32 954 goto err_mod_hdr;
d7e75a32
OG
955 }
956
b8aee822
MB
957 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
958 counter = mlx5_fc_create(esw->dev, true);
959 if (IS_ERR(counter)) {
c83954ab 960 err = PTR_ERR(counter);
b8aee822
MB
961 goto err_create_counter;
962 }
963
964 attr->counter = counter;
965 }
966
c83954ab 967 /* we get here if (1) there's no error or when
3c37745e
OG
968 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
969 */
5dbe906f
PB
970 if (encap_err == -EAGAIN) {
971 /* continue with goto slow path rule instead */
972 struct mlx5_esw_flow_attr slow_attr;
973
974 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
975 } else {
6d2a3ed0 976 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
3c37745e 977 }
c83954ab 978
5dbe906f
PB
979 if (IS_ERR(flow->rule[0])) {
980 err = PTR_ERR(flow->rule[0]);
981 goto err_add_rule;
982 }
983
984 return 0;
aa0cbbae
OG
985
986err_add_rule:
b8aee822
MB
987 mlx5_fc_destroy(esw->dev, counter);
988err_create_counter:
513f8f7f 989 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1a9527bb 990 mlx5e_detach_mod_hdr(priv, flow);
d7e75a32 991err_mod_hdr:
aa0cbbae
OG
992 mlx5_eswitch_del_vlan_action(esw, attr);
993err_add_vlan:
60786f09 994 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
aa0cbbae 995 mlx5e_detach_encap(priv, flow);
3c37745e 996err_attach_encap:
bf07aa73 997err_max_prio_chain:
c83954ab 998 return err;
aa0cbbae 999}
d85cdccb
OG
1000
1001static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1002 struct mlx5e_tc_flow *flow)
1003{
1004 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
d7e75a32 1005 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
5dbe906f 1006 struct mlx5_esw_flow_attr slow_attr;
d85cdccb 1007
5dbe906f
PB
1008 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1009 if (flow->flags & MLX5E_TC_FLOW_SLOW)
1010 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1011 else
1012 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1013 }
d85cdccb 1014
513f8f7f 1015 mlx5_eswitch_del_vlan_action(esw, attr);
d85cdccb 1016
60786f09 1017 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
d85cdccb 1018 mlx5e_detach_encap(priv, flow);
513f8f7f 1019 kvfree(attr->parse_attr);
232c0013 1020 }
d7e75a32 1021
513f8f7f 1022 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1a9527bb 1023 mlx5e_detach_mod_hdr(priv, flow);
b8aee822
MB
1024
1025 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1026 mlx5_fc_destroy(esw->dev, attr->counter);
d85cdccb
OG
1027}
1028
232c0013
HHZ
1029void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1030 struct mlx5e_encap_entry *e)
1031{
3c37745e 1032 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5dbe906f 1033 struct mlx5_esw_flow_attr slow_attr, *esw_attr;
6d2a3ed0
OG
1034 struct mlx5_flow_handle *rule;
1035 struct mlx5_flow_spec *spec;
232c0013
HHZ
1036 struct mlx5e_tc_flow *flow;
1037 int err;
1038
54c177ca
OS
1039 err = mlx5_packet_reformat_alloc(priv->mdev,
1040 e->reformat_type,
60786f09 1041 e->encap_size, e->encap_header,
31ca3648 1042 MLX5_FLOW_NAMESPACE_FDB,
60786f09 1043 &e->encap_id);
232c0013
HHZ
1044 if (err) {
1045 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1046 err);
1047 return;
1048 }
1049 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 1050 mlx5e_rep_queue_neigh_stats_work(priv);
232c0013
HHZ
1051
1052 list_for_each_entry(flow, &e->flows, encap) {
3c37745e
OG
1053 esw_attr = flow->esw_attr;
1054 esw_attr->encap_id = e->encap_id;
6d2a3ed0
OG
1055 spec = &esw_attr->parse_attr->spec;
1056
5dbe906f 1057 /* update from slow path rule to encap rule */
6d2a3ed0
OG
1058 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1059 if (IS_ERR(rule)) {
1060 err = PTR_ERR(rule);
232c0013
HHZ
1061 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1062 err);
1063 continue;
1064 }
5dbe906f
PB
1065
1066 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1067 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
6d2a3ed0 1068 flow->rule[0] = rule;
232c0013
HHZ
1069 }
1070}
1071
1072void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1073 struct mlx5e_encap_entry *e)
1074{
3c37745e 1075 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5dbe906f
PB
1076 struct mlx5_esw_flow_attr slow_attr;
1077 struct mlx5_flow_handle *rule;
1078 struct mlx5_flow_spec *spec;
232c0013 1079 struct mlx5e_tc_flow *flow;
5dbe906f 1080 int err;
232c0013
HHZ
1081
1082 list_for_each_entry(flow, &e->flows, encap) {
5dbe906f
PB
1083 spec = &flow->esw_attr->parse_attr->spec;
1084
1085 /* update from encap rule to slow path rule */
1086 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
1087
1088 if (IS_ERR(rule)) {
1089 err = PTR_ERR(rule);
1090 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1091 err);
1092 continue;
1093 }
1094
1095 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1096 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
1097 flow->rule[0] = rule;
232c0013
HHZ
1098 }
1099
1100 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
1101 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
60786f09 1102 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
232c0013
HHZ
1103 }
1104}
1105
b8aee822
MB
1106static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1107{
1108 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1109 return flow->esw_attr->counter;
1110 else
1111 return flow->nic_attr->counter;
1112}
1113
f6dfb4c3
HHZ
1114void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1115{
1116 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1117 u64 bytes, packets, lastuse = 0;
1118 struct mlx5e_tc_flow *flow;
1119 struct mlx5e_encap_entry *e;
1120 struct mlx5_fc *counter;
1121 struct neigh_table *tbl;
1122 bool neigh_used = false;
1123 struct neighbour *n;
1124
1125 if (m_neigh->family == AF_INET)
1126 tbl = &arp_tbl;
1127#if IS_ENABLED(CONFIG_IPV6)
1128 else if (m_neigh->family == AF_INET6)
423c9db2 1129 tbl = &nd_tbl;
f6dfb4c3
HHZ
1130#endif
1131 else
1132 return;
1133
1134 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1135 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1136 continue;
1137 list_for_each_entry(flow, &e->flows, encap) {
1138 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
b8aee822 1139 counter = mlx5e_tc_get_counter(flow);
f6dfb4c3
HHZ
1140 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1141 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1142 neigh_used = true;
1143 break;
1144 }
1145 }
1146 }
e36d4810
RD
1147 if (neigh_used)
1148 break;
f6dfb4c3
HHZ
1149 }
1150
1151 if (neigh_used) {
1152 nhe->reported_lastuse = jiffies;
1153
1154 /* find the relevant neigh according to the cached device and
1155 * dst ip pair
1156 */
1157 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
c7f7ba8d 1158 if (!n)
f6dfb4c3 1159 return;
f6dfb4c3
HHZ
1160
1161 neigh_event_send(n, NULL);
1162 neigh_release(n);
1163 }
1164}
1165
d85cdccb
OG
1166static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1167 struct mlx5e_tc_flow *flow)
1168{
5067b602
RD
1169 struct list_head *next = flow->encap.next;
1170
1171 list_del(&flow->encap);
1172 if (list_empty(next)) {
c1ae1152 1173 struct mlx5e_encap_entry *e;
5067b602 1174
c1ae1152 1175 e = list_entry(next, struct mlx5e_encap_entry, flows);
232c0013
HHZ
1176 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1177
1178 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
60786f09 1179 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
232c0013 1180
cdc5a7f3 1181 hash_del_rcu(&e->encap_hlist);
232c0013 1182 kfree(e->encap_header);
5067b602
RD
1183 kfree(e);
1184 }
1185}
1186
e8f887ac 1187static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 1188 struct mlx5e_tc_flow *flow)
e8f887ac 1189{
d85cdccb
OG
1190 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1191 mlx5e_tc_del_fdb_flow(priv, flow);
1192 else
1193 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
1194}
1195
bbd00f7e
HHZ
1196
1197static int parse_tunnel_attr(struct mlx5e_priv *priv,
1198 struct mlx5_flow_spec *spec,
54c177ca
OS
1199 struct tc_cls_flower_offload *f,
1200 struct net_device *filter_dev)
bbd00f7e 1201{
e98bedf5 1202 struct netlink_ext_ack *extack = f->common.extack;
bbd00f7e
HHZ
1203 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1204 outer_headers);
1205 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1206 outer_headers);
1207
2e72eb43
OG
1208 struct flow_dissector_key_control *enc_control =
1209 skb_flow_dissector_target(f->dissector,
1210 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1211 f->key);
54c177ca 1212 int err = 0;
2e72eb43 1213
101f4de9
OS
1214 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1215 headers_c, headers_v);
54c177ca
OS
1216 if (err) {
1217 NL_SET_ERR_MSG_MOD(extack,
1218 "failed to parse tunnel attributes");
101f4de9 1219 return err;
bbd00f7e
HHZ
1220 }
1221
2e72eb43 1222 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
1223 struct flow_dissector_key_ipv4_addrs *key =
1224 skb_flow_dissector_target(f->dissector,
1225 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1226 f->key);
1227 struct flow_dissector_key_ipv4_addrs *mask =
1228 skb_flow_dissector_target(f->dissector,
1229 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1230 f->mask);
1231 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1232 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1233 ntohl(mask->src));
1234 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1235 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1236 ntohl(key->src));
1237
1238 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1239 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1240 ntohl(mask->dst));
1241 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1242 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1243 ntohl(key->dst));
bbd00f7e 1244
2e72eb43
OG
1245 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1246 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
1247 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1248 struct flow_dissector_key_ipv6_addrs *key =
1249 skb_flow_dissector_target(f->dissector,
1250 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1251 f->key);
1252 struct flow_dissector_key_ipv6_addrs *mask =
1253 skb_flow_dissector_target(f->dissector,
1254 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1255 f->mask);
1256
1257 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1258 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1259 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1260 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1261 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1262 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1263
1264 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1265 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1266 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1267 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1268 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1269 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1270
1271 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1272 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 1273 }
bbd00f7e 1274
bcef735c
OG
1275 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
1276 struct flow_dissector_key_ip *key =
1277 skb_flow_dissector_target(f->dissector,
1278 FLOW_DISSECTOR_KEY_ENC_IP,
1279 f->key);
1280 struct flow_dissector_key_ip *mask =
1281 skb_flow_dissector_target(f->dissector,
1282 FLOW_DISSECTOR_KEY_ENC_IP,
1283 f->mask);
1284
1285 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1286 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1287
1288 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1289 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1290
1291 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1292 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
e98bedf5
EB
1293
1294 if (mask->ttl &&
1295 !MLX5_CAP_ESW_FLOWTABLE_FDB
1296 (priv->mdev,
1297 ft_field_support.outer_ipv4_ttl)) {
1298 NL_SET_ERR_MSG_MOD(extack,
1299 "Matching on TTL is not supported");
1300 return -EOPNOTSUPP;
1301 }
1302
bcef735c
OG
1303 }
1304
bbd00f7e
HHZ
1305 /* Enforce DMAC when offloading incoming tunneled flows.
1306 * Flow counters require a match on the DMAC.
1307 */
1308 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1309 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1310 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1311 dmac_47_16), priv->netdev->dev_addr);
1312
1313 /* let software handle IP fragments */
1314 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1315 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1316
1317 return 0;
1318}
1319
de0af0bf
RD
1320static int __parse_cls_flower(struct mlx5e_priv *priv,
1321 struct mlx5_flow_spec *spec,
1322 struct tc_cls_flower_offload *f,
54c177ca 1323 struct net_device *filter_dev,
d708f902 1324 u8 *match_level)
e3a2b7ed 1325{
e98bedf5 1326 struct netlink_ext_ack *extack = f->common.extack;
c5bb1730
MG
1327 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1328 outer_headers);
1329 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1330 outer_headers);
699e96dd
JL
1331 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1332 misc_parameters);
1333 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1334 misc_parameters);
e3a2b7ed
AV
1335 u16 addr_type = 0;
1336 u8 ip_proto = 0;
1337
d708f902 1338 *match_level = MLX5_MATCH_NONE;
de0af0bf 1339
e3a2b7ed
AV
1340 if (f->dissector->used_keys &
1341 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1342 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1343 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 1344 BIT(FLOW_DISSECTOR_KEY_VLAN) |
699e96dd 1345 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
e3a2b7ed
AV
1346 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1347 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
1348 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1349 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1350 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1351 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1352 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
e77834ec 1353 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
fd7da28b 1354 BIT(FLOW_DISSECTOR_KEY_TCP) |
bcef735c
OG
1355 BIT(FLOW_DISSECTOR_KEY_IP) |
1356 BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
e98bedf5 1357 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
e3a2b7ed
AV
1358 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1359 f->dissector->used_keys);
1360 return -EOPNOTSUPP;
1361 }
1362
bbd00f7e
HHZ
1363 if ((dissector_uses_key(f->dissector,
1364 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1365 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1366 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1367 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1368 struct flow_dissector_key_control *key =
1369 skb_flow_dissector_target(f->dissector,
1370 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1371 f->key);
1372 switch (key->addr_type) {
1373 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 1374 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
54c177ca 1375 if (parse_tunnel_attr(priv, spec, f, filter_dev))
bbd00f7e
HHZ
1376 return -EOPNOTSUPP;
1377 break;
1378 default:
1379 return -EOPNOTSUPP;
1380 }
1381
1382 /* In decap flow, header pointers should point to the inner
1383 * headers, outer header were already set by parse_tunnel_attr
1384 */
1385 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1386 inner_headers);
1387 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1388 inner_headers);
1389 }
1390
d3a80bb5
OG
1391 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1392 struct flow_dissector_key_basic *key =
e3a2b7ed 1393 skb_flow_dissector_target(f->dissector,
d3a80bb5 1394 FLOW_DISSECTOR_KEY_BASIC,
e3a2b7ed 1395 f->key);
d3a80bb5 1396 struct flow_dissector_key_basic *mask =
e3a2b7ed 1397 skb_flow_dissector_target(f->dissector,
d3a80bb5 1398 FLOW_DISSECTOR_KEY_BASIC,
e3a2b7ed 1399 f->mask);
d3a80bb5
OG
1400 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1401 ntohs(mask->n_proto));
1402 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1403 ntohs(key->n_proto));
e3a2b7ed 1404
d3a80bb5 1405 if (mask->n_proto)
d708f902 1406 *match_level = MLX5_MATCH_L2;
e3a2b7ed
AV
1407 }
1408
095b6cfd
OG
1409 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1410 struct flow_dissector_key_vlan *key =
1411 skb_flow_dissector_target(f->dissector,
1412 FLOW_DISSECTOR_KEY_VLAN,
1413 f->key);
1414 struct flow_dissector_key_vlan *mask =
1415 skb_flow_dissector_target(f->dissector,
1416 FLOW_DISSECTOR_KEY_VLAN,
1417 f->mask);
699e96dd
JL
1418 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1419 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1420 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1421 svlan_tag, 1);
1422 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1423 svlan_tag, 1);
1424 } else {
1425 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1426 cvlan_tag, 1);
1427 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1428 cvlan_tag, 1);
1429 }
095b6cfd
OG
1430
1431 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1432 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
1433
1434 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1435 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
54782900 1436
d708f902 1437 *match_level = MLX5_MATCH_L2;
54782900 1438 }
d3a80bb5 1439 } else if (*match_level != MLX5_MATCH_NONE) {
cee26487
JL
1440 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1441 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
d3a80bb5 1442 *match_level = MLX5_MATCH_L2;
54782900
OG
1443 }
1444
699e96dd
JL
1445 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
1446 struct flow_dissector_key_vlan *key =
1447 skb_flow_dissector_target(f->dissector,
1448 FLOW_DISSECTOR_KEY_CVLAN,
1449 f->key);
1450 struct flow_dissector_key_vlan *mask =
1451 skb_flow_dissector_target(f->dissector,
1452 FLOW_DISSECTOR_KEY_CVLAN,
1453 f->mask);
1454 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1455 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1456 MLX5_SET(fte_match_set_misc, misc_c,
1457 outer_second_svlan_tag, 1);
1458 MLX5_SET(fte_match_set_misc, misc_v,
1459 outer_second_svlan_tag, 1);
1460 } else {
1461 MLX5_SET(fte_match_set_misc, misc_c,
1462 outer_second_cvlan_tag, 1);
1463 MLX5_SET(fte_match_set_misc, misc_v,
1464 outer_second_cvlan_tag, 1);
1465 }
1466
1467 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1468 mask->vlan_id);
1469 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1470 key->vlan_id);
1471 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1472 mask->vlan_priority);
1473 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1474 key->vlan_priority);
1475
1476 *match_level = MLX5_MATCH_L2;
1477 }
1478 }
1479
d3a80bb5
OG
1480 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1481 struct flow_dissector_key_eth_addrs *key =
54782900 1482 skb_flow_dissector_target(f->dissector,
d3a80bb5 1483 FLOW_DISSECTOR_KEY_ETH_ADDRS,
54782900 1484 f->key);
d3a80bb5 1485 struct flow_dissector_key_eth_addrs *mask =
54782900 1486 skb_flow_dissector_target(f->dissector,
d3a80bb5 1487 FLOW_DISSECTOR_KEY_ETH_ADDRS,
54782900 1488 f->mask);
54782900 1489
d3a80bb5
OG
1490 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1491 dmac_47_16),
1492 mask->dst);
1493 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1494 dmac_47_16),
1495 key->dst);
1496
1497 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1498 smac_47_16),
1499 mask->src);
1500 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1501 smac_47_16),
1502 key->src);
1503
1504 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
d708f902 1505 *match_level = MLX5_MATCH_L2;
54782900
OG
1506 }
1507
1508 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1509 struct flow_dissector_key_control *key =
1510 skb_flow_dissector_target(f->dissector,
1511 FLOW_DISSECTOR_KEY_CONTROL,
1512 f->key);
1513
1514 struct flow_dissector_key_control *mask =
1515 skb_flow_dissector_target(f->dissector,
1516 FLOW_DISSECTOR_KEY_CONTROL,
1517 f->mask);
1518 addr_type = key->addr_type;
1519
1520 /* the HW doesn't support frag first/later */
1521 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1522 return -EOPNOTSUPP;
1523
1524 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1525 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1526 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1527 key->flags & FLOW_DIS_IS_FRAGMENT);
1528
1529 /* the HW doesn't need L3 inline to match on frag=no */
1530 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
83621b7d 1531 *match_level = MLX5_MATCH_L2;
54782900
OG
1532 /* *** L2 attributes parsing up to here *** */
1533 else
83621b7d 1534 *match_level = MLX5_MATCH_L3;
095b6cfd
OG
1535 }
1536 }
1537
54782900
OG
1538 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1539 struct flow_dissector_key_basic *key =
1540 skb_flow_dissector_target(f->dissector,
1541 FLOW_DISSECTOR_KEY_BASIC,
1542 f->key);
1543 struct flow_dissector_key_basic *mask =
1544 skb_flow_dissector_target(f->dissector,
1545 FLOW_DISSECTOR_KEY_BASIC,
1546 f->mask);
1547 ip_proto = key->ip_proto;
1548
1549 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1550 mask->ip_proto);
1551 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1552 key->ip_proto);
1553
1554 if (mask->ip_proto)
d708f902 1555 *match_level = MLX5_MATCH_L3;
54782900
OG
1556 }
1557
e3a2b7ed
AV
1558 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1559 struct flow_dissector_key_ipv4_addrs *key =
1560 skb_flow_dissector_target(f->dissector,
1561 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1562 f->key);
1563 struct flow_dissector_key_ipv4_addrs *mask =
1564 skb_flow_dissector_target(f->dissector,
1565 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1566 f->mask);
1567
1568 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1569 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1570 &mask->src, sizeof(mask->src));
1571 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1572 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1573 &key->src, sizeof(key->src));
1574 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1575 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1576 &mask->dst, sizeof(mask->dst));
1577 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1578 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1579 &key->dst, sizeof(key->dst));
de0af0bf
RD
1580
1581 if (mask->src || mask->dst)
d708f902 1582 *match_level = MLX5_MATCH_L3;
e3a2b7ed
AV
1583 }
1584
1585 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1586 struct flow_dissector_key_ipv6_addrs *key =
1587 skb_flow_dissector_target(f->dissector,
1588 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1589 f->key);
1590 struct flow_dissector_key_ipv6_addrs *mask =
1591 skb_flow_dissector_target(f->dissector,
1592 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1593 f->mask);
1594
1595 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1596 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1597 &mask->src, sizeof(mask->src));
1598 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1599 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1600 &key->src, sizeof(key->src));
1601
1602 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1603 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1604 &mask->dst, sizeof(mask->dst));
1605 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1606 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1607 &key->dst, sizeof(key->dst));
de0af0bf
RD
1608
1609 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1610 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
d708f902 1611 *match_level = MLX5_MATCH_L3;
e3a2b7ed
AV
1612 }
1613
1f97a526
OG
1614 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1615 struct flow_dissector_key_ip *key =
1616 skb_flow_dissector_target(f->dissector,
1617 FLOW_DISSECTOR_KEY_IP,
1618 f->key);
1619 struct flow_dissector_key_ip *mask =
1620 skb_flow_dissector_target(f->dissector,
1621 FLOW_DISSECTOR_KEY_IP,
1622 f->mask);
1623
1624 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1625 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1626
1627 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1628 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1629
a8ade55f
OG
1630 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1631 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1f97a526 1632
a8ade55f
OG
1633 if (mask->ttl &&
1634 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
e98bedf5
EB
1635 ft_field_support.outer_ipv4_ttl)) {
1636 NL_SET_ERR_MSG_MOD(extack,
1637 "Matching on TTL is not supported");
1f97a526 1638 return -EOPNOTSUPP;
e98bedf5 1639 }
a8ade55f
OG
1640
1641 if (mask->tos || mask->ttl)
d708f902 1642 *match_level = MLX5_MATCH_L3;
1f97a526
OG
1643 }
1644
54782900
OG
1645 /* *** L3 attributes parsing up to here *** */
1646
e3a2b7ed
AV
1647 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1648 struct flow_dissector_key_ports *key =
1649 skb_flow_dissector_target(f->dissector,
1650 FLOW_DISSECTOR_KEY_PORTS,
1651 f->key);
1652 struct flow_dissector_key_ports *mask =
1653 skb_flow_dissector_target(f->dissector,
1654 FLOW_DISSECTOR_KEY_PORTS,
1655 f->mask);
1656 switch (ip_proto) {
1657 case IPPROTO_TCP:
1658 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1659 tcp_sport, ntohs(mask->src));
1660 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1661 tcp_sport, ntohs(key->src));
1662
1663 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1664 tcp_dport, ntohs(mask->dst));
1665 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1666 tcp_dport, ntohs(key->dst));
1667 break;
1668
1669 case IPPROTO_UDP:
1670 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1671 udp_sport, ntohs(mask->src));
1672 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1673 udp_sport, ntohs(key->src));
1674
1675 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1676 udp_dport, ntohs(mask->dst));
1677 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1678 udp_dport, ntohs(key->dst));
1679 break;
1680 default:
e98bedf5
EB
1681 NL_SET_ERR_MSG_MOD(extack,
1682 "Only UDP and TCP transports are supported for L4 matching");
e3a2b7ed
AV
1683 netdev_err(priv->netdev,
1684 "Only UDP and TCP transport are supported\n");
1685 return -EINVAL;
1686 }
de0af0bf
RD
1687
1688 if (mask->src || mask->dst)
d708f902 1689 *match_level = MLX5_MATCH_L4;
e3a2b7ed
AV
1690 }
1691
e77834ec
OG
1692 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1693 struct flow_dissector_key_tcp *key =
1694 skb_flow_dissector_target(f->dissector,
1695 FLOW_DISSECTOR_KEY_TCP,
1696 f->key);
1697 struct flow_dissector_key_tcp *mask =
1698 skb_flow_dissector_target(f->dissector,
1699 FLOW_DISSECTOR_KEY_TCP,
1700 f->mask);
1701
1702 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1703 ntohs(mask->flags));
1704 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1705 ntohs(key->flags));
1706
1707 if (mask->flags)
d708f902 1708 *match_level = MLX5_MATCH_L4;
e77834ec
OG
1709 }
1710
e3a2b7ed
AV
1711 return 0;
1712}
1713
de0af0bf 1714static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 1715 struct mlx5e_tc_flow *flow,
de0af0bf 1716 struct mlx5_flow_spec *spec,
54c177ca
OS
1717 struct tc_cls_flower_offload *f,
1718 struct net_device *filter_dev)
de0af0bf 1719{
e98bedf5 1720 struct netlink_ext_ack *extack = f->common.extack;
de0af0bf
RD
1721 struct mlx5_core_dev *dev = priv->mdev;
1722 struct mlx5_eswitch *esw = dev->priv.eswitch;
1d447a39
SM
1723 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1724 struct mlx5_eswitch_rep *rep;
d708f902 1725 u8 match_level;
de0af0bf
RD
1726 int err;
1727
54c177ca 1728 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
de0af0bf 1729
1d447a39
SM
1730 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1731 rep = rpriv->rep;
1732 if (rep->vport != FDB_UPLINK_VPORT &&
1733 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
d708f902 1734 esw->offloads.inline_mode < match_level)) {
e98bedf5
EB
1735 NL_SET_ERR_MSG_MOD(extack,
1736 "Flow is not offloaded due to min inline setting");
de0af0bf
RD
1737 netdev_warn(priv->netdev,
1738 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
d708f902 1739 match_level, esw->offloads.inline_mode);
de0af0bf
RD
1740 return -EOPNOTSUPP;
1741 }
1742 }
1743
38aa51c1
OG
1744 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1745 flow->esw_attr->match_level = match_level;
1746 else
1747 flow->nic_attr->match_level = match_level;
1748
de0af0bf
RD
1749 return err;
1750}
1751
d79b6df6
OG
1752struct pedit_headers {
1753 struct ethhdr eth;
1754 struct iphdr ip4;
1755 struct ipv6hdr ip6;
1756 struct tcphdr tcp;
1757 struct udphdr udp;
1758};
1759
1760static int pedit_header_offsets[] = {
1761 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1762 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1763 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1764 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1765 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1766};
1767
1768#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1769
1770static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1771 struct pedit_headers *masks,
1772 struct pedit_headers *vals)
1773{
1774 u32 *curr_pmask, *curr_pval;
1775
1776 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1777 goto out_err;
1778
1779 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1780 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1781
1782 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1783 goto out_err;
1784
1785 *curr_pmask |= mask;
1786 *curr_pval |= (val & mask);
1787
1788 return 0;
1789
1790out_err:
1791 return -EOPNOTSUPP;
1792}
1793
1794struct mlx5_fields {
1795 u8 field;
1796 u8 size;
1797 u32 offset;
1798};
1799
a8e4f0c4
OG
1800#define OFFLOAD(fw_field, size, field, off) \
1801 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1802
d79b6df6 1803static struct mlx5_fields fields[] = {
a8e4f0c4
OG
1804 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1805 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1806 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1807 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1808 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1809
1810 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1811 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1812 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1813
1814 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1815 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1816 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1817 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1818 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1819 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1820 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1821 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
0c0316f5 1822 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
a8e4f0c4
OG
1823
1824 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1825 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1826 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1827
1828 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1829 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
d79b6df6
OG
1830};
1831
1832/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1833 * max from the SW pedit action. On success, it says how many HW actions were
1834 * actually parsed.
1835 */
1836static int offload_pedit_fields(struct pedit_headers *masks,
1837 struct pedit_headers *vals,
e98bedf5
EB
1838 struct mlx5e_tc_flow_parse_attr *parse_attr,
1839 struct netlink_ext_ack *extack)
d79b6df6
OG
1840{
1841 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2b64beba 1842 int i, action_size, nactions, max_actions, first, last, next_z;
d79b6df6 1843 void *s_masks_p, *a_masks_p, *vals_p;
d79b6df6
OG
1844 struct mlx5_fields *f;
1845 u8 cmd, field_bsize;
e3ca4e05 1846 u32 s_mask, a_mask;
d79b6df6 1847 unsigned long mask;
2b64beba
OG
1848 __be32 mask_be32;
1849 __be16 mask_be16;
d79b6df6
OG
1850 void *action;
1851
1852 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1853 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1854 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1855 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1856
1857 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1858 action = parse_attr->mod_hdr_actions;
1859 max_actions = parse_attr->num_mod_hdr_actions;
1860 nactions = 0;
1861
1862 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1863 f = &fields[i];
1864 /* avoid seeing bits set from previous iterations */
e3ca4e05
OG
1865 s_mask = 0;
1866 a_mask = 0;
d79b6df6
OG
1867
1868 s_masks_p = (void *)set_masks + f->offset;
1869 a_masks_p = (void *)add_masks + f->offset;
1870
1871 memcpy(&s_mask, s_masks_p, f->size);
1872 memcpy(&a_mask, a_masks_p, f->size);
1873
1874 if (!s_mask && !a_mask) /* nothing to offload here */
1875 continue;
1876
1877 if (s_mask && a_mask) {
e98bedf5
EB
1878 NL_SET_ERR_MSG_MOD(extack,
1879 "can't set and add to the same HW field");
d79b6df6
OG
1880 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1881 return -EOPNOTSUPP;
1882 }
1883
1884 if (nactions == max_actions) {
e98bedf5
EB
1885 NL_SET_ERR_MSG_MOD(extack,
1886 "too many pedit actions, can't offload");
d79b6df6
OG
1887 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1888 return -EOPNOTSUPP;
1889 }
1890
1891 if (s_mask) {
1892 cmd = MLX5_ACTION_TYPE_SET;
1893 mask = s_mask;
1894 vals_p = (void *)set_vals + f->offset;
1895 /* clear to denote we consumed this field */
1896 memset(s_masks_p, 0, f->size);
1897 } else {
1898 cmd = MLX5_ACTION_TYPE_ADD;
1899 mask = a_mask;
1900 vals_p = (void *)add_vals + f->offset;
1901 /* clear to denote we consumed this field */
1902 memset(a_masks_p, 0, f->size);
1903 }
1904
d79b6df6 1905 field_bsize = f->size * BITS_PER_BYTE;
e3ca4e05 1906
2b64beba
OG
1907 if (field_bsize == 32) {
1908 mask_be32 = *(__be32 *)&mask;
1909 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1910 } else if (field_bsize == 16) {
1911 mask_be16 = *(__be16 *)&mask;
1912 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1913 }
1914
d79b6df6 1915 first = find_first_bit(&mask, field_bsize);
2b64beba 1916 next_z = find_next_zero_bit(&mask, field_bsize, first);
d79b6df6 1917 last = find_last_bit(&mask, field_bsize);
2b64beba 1918 if (first < next_z && next_z < last) {
e98bedf5
EB
1919 NL_SET_ERR_MSG_MOD(extack,
1920 "rewrite of few sub-fields isn't supported");
2b64beba 1921 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
d79b6df6
OG
1922 mask);
1923 return -EOPNOTSUPP;
1924 }
1925
1926 MLX5_SET(set_action_in, action, action_type, cmd);
1927 MLX5_SET(set_action_in, action, field, f->field);
1928
1929 if (cmd == MLX5_ACTION_TYPE_SET) {
2b64beba 1930 MLX5_SET(set_action_in, action, offset, first);
d79b6df6 1931 /* length is num of bits to be written, zero means length of 32 */
2b64beba 1932 MLX5_SET(set_action_in, action, length, (last - first + 1));
d79b6df6
OG
1933 }
1934
1935 if (field_bsize == 32)
2b64beba 1936 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
d79b6df6 1937 else if (field_bsize == 16)
2b64beba 1938 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
d79b6df6 1939 else if (field_bsize == 8)
2b64beba 1940 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
d79b6df6
OG
1941
1942 action += action_size;
1943 nactions++;
1944 }
1945
1946 parse_attr->num_mod_hdr_actions = nactions;
1947 return 0;
1948}
1949
1950static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1951 const struct tc_action *a, int namespace,
1952 struct mlx5e_tc_flow_parse_attr *parse_attr)
1953{
1954 int nkeys, action_size, max_actions;
1955
1956 nkeys = tcf_pedit_nkeys(a);
1957 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1958
1959 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1960 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1961 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1962 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1963
1964 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1965 max_actions = min(max_actions, nkeys * 16);
1966
1967 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1968 if (!parse_attr->mod_hdr_actions)
1969 return -ENOMEM;
1970
1971 parse_attr->num_mod_hdr_actions = max_actions;
1972 return 0;
1973}
1974
1975static const struct pedit_headers zero_masks = {};
1976
1977static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1978 const struct tc_action *a, int namespace,
e98bedf5
EB
1979 struct mlx5e_tc_flow_parse_attr *parse_attr,
1980 struct netlink_ext_ack *extack)
d79b6df6
OG
1981{
1982 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1983 int nkeys, i, err = -EOPNOTSUPP;
1984 u32 mask, val, offset;
1985 u8 cmd, htype;
1986
1987 nkeys = tcf_pedit_nkeys(a);
1988
1989 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1990 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1991
1992 for (i = 0; i < nkeys; i++) {
1993 htype = tcf_pedit_htype(a, i);
1994 cmd = tcf_pedit_cmd(a, i);
1995 err = -EOPNOTSUPP; /* can't be all optimistic */
1996
1997 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
e98bedf5
EB
1998 NL_SET_ERR_MSG_MOD(extack,
1999 "legacy pedit isn't offloaded");
d79b6df6
OG
2000 goto out_err;
2001 }
2002
2003 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
e98bedf5 2004 NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
d79b6df6
OG
2005 goto out_err;
2006 }
2007
2008 mask = tcf_pedit_mask(a, i);
2009 val = tcf_pedit_val(a, i);
2010 offset = tcf_pedit_offset(a, i);
2011
2012 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
2013 if (err)
2014 goto out_err;
2015 }
2016
2017 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2018 if (err)
2019 goto out_err;
2020
e98bedf5 2021 err = offload_pedit_fields(masks, vals, parse_attr, extack);
d79b6df6
OG
2022 if (err < 0)
2023 goto out_dealloc_parsed_actions;
2024
2025 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2026 cmd_masks = &masks[cmd];
2027 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
e98bedf5
EB
2028 NL_SET_ERR_MSG_MOD(extack,
2029 "attempt to offload an unsupported field");
b3a433de 2030 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
d79b6df6
OG
2031 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2032 16, 1, cmd_masks, sizeof(zero_masks), true);
2033 err = -EOPNOTSUPP;
2034 goto out_dealloc_parsed_actions;
2035 }
2036 }
2037
2038 return 0;
2039
2040out_dealloc_parsed_actions:
2041 kfree(parse_attr->mod_hdr_actions);
2042out_err:
2043 return err;
2044}
2045
e98bedf5
EB
2046static bool csum_offload_supported(struct mlx5e_priv *priv,
2047 u32 action,
2048 u32 update_flags,
2049 struct netlink_ext_ack *extack)
26c02749
OG
2050{
2051 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2052 TCA_CSUM_UPDATE_FLAG_UDP;
2053
2054 /* The HW recalcs checksums only if re-writing headers */
2055 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
e98bedf5
EB
2056 NL_SET_ERR_MSG_MOD(extack,
2057 "TC csum action is only offloaded with pedit");
26c02749
OG
2058 netdev_warn(priv->netdev,
2059 "TC csum action is only offloaded with pedit\n");
2060 return false;
2061 }
2062
2063 if (update_flags & ~prot_flags) {
e98bedf5
EB
2064 NL_SET_ERR_MSG_MOD(extack,
2065 "can't offload TC csum action for some header/s");
26c02749
OG
2066 netdev_warn(priv->netdev,
2067 "can't offload TC csum action for some header/s - flags %#x\n",
2068 update_flags);
2069 return false;
2070 }
2071
2072 return true;
2073}
2074
bdd66ac0 2075static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
e98bedf5
EB
2076 struct tcf_exts *exts,
2077 struct netlink_ext_ack *extack)
bdd66ac0
OG
2078{
2079 const struct tc_action *a;
2080 bool modify_ip_header;
2081 LIST_HEAD(actions);
2082 u8 htype, ip_proto;
2083 void *headers_v;
2084 u16 ethertype;
2085 int nkeys, i;
2086
2087 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2088 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2089
2090 /* for non-IP we only re-write MACs, so we're okay */
2091 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2092 goto out_ok;
2093
2094 modify_ip_header = false;
244cd96a
CW
2095 tcf_exts_for_each_action(i, a, exts) {
2096 int k;
2097
bdd66ac0
OG
2098 if (!is_tcf_pedit(a))
2099 continue;
2100
2101 nkeys = tcf_pedit_nkeys(a);
244cd96a
CW
2102 for (k = 0; k < nkeys; k++) {
2103 htype = tcf_pedit_htype(a, k);
bdd66ac0
OG
2104 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
2105 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
2106 modify_ip_header = true;
2107 break;
2108 }
2109 }
2110 }
2111
2112 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1ccef350
JL
2113 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2114 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
e98bedf5
EB
2115 NL_SET_ERR_MSG_MOD(extack,
2116 "can't offload re-write of non TCP/UDP");
bdd66ac0
OG
2117 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2118 return false;
2119 }
2120
2121out_ok:
2122 return true;
2123}
2124
2125static bool actions_match_supported(struct mlx5e_priv *priv,
2126 struct tcf_exts *exts,
2127 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2128 struct mlx5e_tc_flow *flow,
2129 struct netlink_ext_ack *extack)
bdd66ac0
OG
2130{
2131 u32 actions;
2132
2133 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2134 actions = flow->esw_attr->action;
2135 else
2136 actions = flow->nic_attr->action;
2137
7e29392e
RD
2138 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2139 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2140 return false;
2141
bdd66ac0 2142 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
e98bedf5
EB
2143 return modify_header_match_supported(&parse_attr->spec, exts,
2144 extack);
bdd66ac0
OG
2145
2146 return true;
2147}
2148
5c65c564
OG
2149static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2150{
2151 struct mlx5_core_dev *fmdev, *pmdev;
816f6706 2152 u64 fsystem_guid, psystem_guid;
5c65c564
OG
2153
2154 fmdev = priv->mdev;
2155 pmdev = peer_priv->mdev;
2156
59c9d35e
AH
2157 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2158 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
5c65c564 2159
816f6706 2160 return (fsystem_guid == psystem_guid);
5c65c564
OG
2161}
2162
5c40348c 2163static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
aa0cbbae 2164 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2165 struct mlx5e_tc_flow *flow,
2166 struct netlink_ext_ack *extack)
e3a2b7ed 2167{
aa0cbbae 2168 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
e3a2b7ed 2169 const struct tc_action *a;
22dc13c8 2170 LIST_HEAD(actions);
1cab1cd7 2171 u32 action = 0;
244cd96a 2172 int err, i;
e3a2b7ed 2173
3bcc0cec 2174 if (!tcf_exts_has_actions(exts))
e3a2b7ed
AV
2175 return -EINVAL;
2176
3bc4b7bf 2177 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
e3a2b7ed 2178
244cd96a 2179 tcf_exts_for_each_action(i, a, exts) {
e3a2b7ed 2180 if (is_tcf_gact_shot(a)) {
1cab1cd7 2181 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
2182 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2183 flow_table_properties_nic_receive.flow_counter))
1cab1cd7 2184 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
2185 continue;
2186 }
2187
2f4fe4ca
OG
2188 if (is_tcf_pedit(a)) {
2189 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
e98bedf5 2190 parse_attr, extack);
2f4fe4ca
OG
2191 if (err)
2192 return err;
2193
1cab1cd7
OG
2194 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2195 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2f4fe4ca
OG
2196 continue;
2197 }
2198
26c02749 2199 if (is_tcf_csum(a)) {
1cab1cd7 2200 if (csum_offload_supported(priv, action,
e98bedf5
EB
2201 tcf_csum_update_flags(a),
2202 extack))
26c02749
OG
2203 continue;
2204
2205 return -EOPNOTSUPP;
2206 }
2207
5c65c564
OG
2208 if (is_tcf_mirred_egress_redirect(a)) {
2209 struct net_device *peer_dev = tcf_mirred_dev(a);
2210
2211 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2212 same_hw_devs(priv, netdev_priv(peer_dev))) {
2213 parse_attr->mirred_ifindex = peer_dev->ifindex;
2214 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
1cab1cd7
OG
2215 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2216 MLX5_FLOW_CONTEXT_ACTION_COUNT;
5c65c564 2217 } else {
e98bedf5
EB
2218 NL_SET_ERR_MSG_MOD(extack,
2219 "device is not on same HW, can't offload");
5c65c564
OG
2220 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2221 peer_dev->name);
2222 return -EINVAL;
2223 }
2224 continue;
2225 }
2226
e3a2b7ed
AV
2227 if (is_tcf_skbedit_mark(a)) {
2228 u32 mark = tcf_skbedit_mark(a);
2229
2230 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
e98bedf5
EB
2231 NL_SET_ERR_MSG_MOD(extack,
2232 "Bad flow mark - only 16 bit is supported");
e3a2b7ed
AV
2233 return -EINVAL;
2234 }
2235
3bc4b7bf 2236 attr->flow_tag = mark;
1cab1cd7 2237 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e3a2b7ed
AV
2238 continue;
2239 }
2240
2241 return -EINVAL;
2242 }
2243
1cab1cd7 2244 attr->action = action;
e98bedf5 2245 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
bdd66ac0
OG
2246 return -EOPNOTSUPP;
2247
e3a2b7ed
AV
2248 return 0;
2249}
2250
76f7444d
OG
2251static inline int cmp_encap_info(struct ip_tunnel_key *a,
2252 struct ip_tunnel_key *b)
a54e20b4
HHZ
2253{
2254 return memcmp(a, b, sizeof(*a));
2255}
2256
76f7444d 2257static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 2258{
76f7444d 2259 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
2260}
2261
a54e20b4 2262
b1d90e6b
RL
2263static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2264 struct net_device *peer_netdev)
2265{
2266 struct mlx5e_priv *peer_priv;
2267
2268 peer_priv = netdev_priv(peer_netdev);
2269
2270 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2271 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2272 same_hw_devs(priv, peer_priv) &&
2273 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2274 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2275}
2276
32f3671f 2277
f5bc2c5d 2278
a54e20b4
HHZ
2279static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2280 struct ip_tunnel_info *tun_info,
2281 struct net_device *mirred_dev,
45247bf2 2282 struct net_device **encap_dev,
e98bedf5
EB
2283 struct mlx5e_tc_flow *flow,
2284 struct netlink_ext_ack *extack)
a54e20b4
HHZ
2285{
2286 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2287 unsigned short family = ip_tunnel_info_af(tun_info);
45247bf2 2288 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
a54e20b4 2289 struct ip_tunnel_key *key = &tun_info->key;
c1ae1152 2290 struct mlx5e_encap_entry *e;
a54e20b4
HHZ
2291 uintptr_t hash_key;
2292 bool found = false;
54c177ca 2293 int err = 0;
a54e20b4 2294
76f7444d 2295 hash_key = hash_encap_info(key);
a54e20b4
HHZ
2296
2297 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2298 encap_hlist, hash_key) {
76f7444d 2299 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
2300 found = true;
2301 break;
2302 }
2303 }
2304
b2812089 2305 /* must verify if encap is valid or not */
45247bf2
OG
2306 if (found)
2307 goto attach_flow;
a54e20b4
HHZ
2308
2309 e = kzalloc(sizeof(*e), GFP_KERNEL);
2310 if (!e)
2311 return -ENOMEM;
2312
76f7444d 2313 e->tun_info = *tun_info;
101f4de9 2314 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
54c177ca
OS
2315 if (err)
2316 goto out_err;
2317
a54e20b4
HHZ
2318 INIT_LIST_HEAD(&e->flows);
2319
ce99f6b9 2320 if (family == AF_INET)
101f4de9 2321 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
ce99f6b9 2322 else if (family == AF_INET6)
101f4de9 2323 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
ce99f6b9 2324
232c0013 2325 if (err && err != -EAGAIN)
a54e20b4
HHZ
2326 goto out_err;
2327
a54e20b4
HHZ
2328 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2329
45247bf2
OG
2330attach_flow:
2331 list_add(&flow->encap, &e->flows);
2332 *encap_dev = e->out_dev;
232c0013
HHZ
2333 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
2334 attr->encap_id = e->encap_id;
b2812089
VB
2335 else
2336 err = -EAGAIN;
45247bf2 2337
232c0013 2338 return err;
a54e20b4
HHZ
2339
2340out_err:
2341 kfree(e);
2342 return err;
2343}
2344
1482bd3d
JL
2345static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2346 const struct tc_action *a,
2347 struct mlx5_esw_flow_attr *attr,
2348 u32 *action)
2349{
cc495188
JL
2350 u8 vlan_idx = attr->total_vlan;
2351
2352 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2353 return -EOPNOTSUPP;
2354
1482bd3d 2355 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
cc495188
JL
2356 if (vlan_idx) {
2357 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2358 MLX5_FS_VLAN_DEPTH))
2359 return -EOPNOTSUPP;
2360
2361 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2362 } else {
2363 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2364 }
1482bd3d 2365 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
cc495188
JL
2366 attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
2367 attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
2368 attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
2369 if (!attr->vlan_proto[vlan_idx])
2370 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2371
2372 if (vlan_idx) {
2373 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2374 MLX5_FS_VLAN_DEPTH))
2375 return -EOPNOTSUPP;
2376
2377 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2378 } else {
2379 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2380 (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2381 tcf_vlan_push_prio(a)))
2382 return -EOPNOTSUPP;
2383
2384 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1482bd3d
JL
2385 }
2386 } else { /* action is TCA_VLAN_ACT_MODIFY */
2387 return -EOPNOTSUPP;
2388 }
2389
cc495188
JL
2390 attr->total_vlan = vlan_idx + 1;
2391
1482bd3d
JL
2392 return 0;
2393}
2394
03a9d11e 2395static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
d7e75a32 2396 struct mlx5e_tc_flow_parse_attr *parse_attr,
e98bedf5
EB
2397 struct mlx5e_tc_flow *flow,
2398 struct netlink_ext_ack *extack)
03a9d11e 2399{
bf07aa73 2400 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
ecf5bb79 2401 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1d447a39 2402 struct mlx5e_rep_priv *rpriv = priv->ppriv;
a54e20b4 2403 struct ip_tunnel_info *info = NULL;
03a9d11e 2404 const struct tc_action *a;
22dc13c8 2405 LIST_HEAD(actions);
a54e20b4 2406 bool encap = false;
1cab1cd7 2407 u32 action = 0;
244cd96a 2408 int err, i;
03a9d11e 2409
3bcc0cec 2410 if (!tcf_exts_has_actions(exts))
03a9d11e
OG
2411 return -EINVAL;
2412
1d447a39 2413 attr->in_rep = rpriv->rep;
10ff5359 2414 attr->in_mdev = priv->mdev;
03a9d11e 2415
244cd96a 2416 tcf_exts_for_each_action(i, a, exts) {
03a9d11e 2417 if (is_tcf_gact_shot(a)) {
1cab1cd7
OG
2418 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2419 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
2420 continue;
2421 }
2422
d7e75a32
OG
2423 if (is_tcf_pedit(a)) {
2424 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
e98bedf5 2425 parse_attr, extack);
d7e75a32
OG
2426 if (err)
2427 return err;
2428
1cab1cd7 2429 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
e85e02ba 2430 attr->split_count = attr->out_count;
d7e75a32
OG
2431 continue;
2432 }
2433
26c02749 2434 if (is_tcf_csum(a)) {
1cab1cd7 2435 if (csum_offload_supported(priv, action,
e98bedf5
EB
2436 tcf_csum_update_flags(a),
2437 extack))
26c02749
OG
2438 continue;
2439
2440 return -EOPNOTSUPP;
2441 }
2442
592d3651 2443 if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
03a9d11e 2444 struct mlx5e_priv *out_priv;
592d3651 2445 struct net_device *out_dev;
03a9d11e 2446
9f8a739e 2447 out_dev = tcf_mirred_dev(a);
ef381359
OS
2448 if (!out_dev) {
2449 /* out_dev is NULL when filters with
2450 * non-existing mirred device are replayed to
2451 * the driver.
2452 */
2453 return -EINVAL;
2454 }
03a9d11e 2455
592d3651 2456 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
e98bedf5
EB
2457 NL_SET_ERR_MSG_MOD(extack,
2458 "can't support more output ports, can't offload forwarding");
592d3651
CM
2459 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2460 attr->out_count);
2461 return -EOPNOTSUPP;
2462 }
2463
a54e20b4 2464 if (switchdev_port_same_parent_id(priv->netdev,
b1d90e6b
RL
2465 out_dev) ||
2466 is_merged_eswitch_dev(priv, out_dev)) {
1cab1cd7
OG
2467 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2468 MLX5_FLOW_CONTEXT_ACTION_COUNT;
a54e20b4 2469 out_priv = netdev_priv(out_dev);
1d447a39 2470 rpriv = out_priv->ppriv;
592d3651
CM
2471 attr->out_rep[attr->out_count] = rpriv->rep;
2472 attr->out_mdev[attr->out_count++] = out_priv->mdev;
a54e20b4 2473 } else if (encap) {
9f8a739e 2474 parse_attr->mirred_ifindex = out_dev->ifindex;
3c37745e
OG
2475 parse_attr->tun_info = *info;
2476 attr->parse_attr = parse_attr;
60786f09 2477 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1cab1cd7
OG
2478 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2479 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3c37745e 2480 /* attr->out_rep is resolved when we handle encap */
ef381359
OS
2481 } else if (parse_attr->filter_dev != priv->netdev) {
2482 /* All mlx5 devices are called to configure
2483 * high level device filters. Therefore, the
2484 * *attempt* to install a filter on invalid
2485 * eswitch should not trigger an explicit error
2486 */
2487 return -EINVAL;
a54e20b4 2488 } else {
e98bedf5
EB
2489 NL_SET_ERR_MSG_MOD(extack,
2490 "devices are not on same switch HW, can't offload forwarding");
03a9d11e
OG
2491 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2492 priv->netdev->name, out_dev->name);
2493 return -EINVAL;
2494 }
a54e20b4
HHZ
2495 continue;
2496 }
03a9d11e 2497
a54e20b4
HHZ
2498 if (is_tcf_tunnel_set(a)) {
2499 info = tcf_tunnel_info(a);
2500 if (info)
2501 encap = true;
2502 else
2503 return -EOPNOTSUPP;
e85e02ba 2504 attr->split_count = attr->out_count;
03a9d11e
OG
2505 continue;
2506 }
2507
8b32580d 2508 if (is_tcf_vlan(a)) {
1482bd3d
JL
2509 err = parse_tc_vlan_action(priv, a, attr, &action);
2510
2511 if (err)
2512 return err;
2513
e85e02ba 2514 attr->split_count = attr->out_count;
8b32580d
OG
2515 continue;
2516 }
2517
bbd00f7e 2518 if (is_tcf_tunnel_release(a)) {
1cab1cd7 2519 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
bbd00f7e
HHZ
2520 continue;
2521 }
2522
bf07aa73
PB
2523 if (is_tcf_gact_goto_chain(a)) {
2524 u32 dest_chain = tcf_gact_goto_chain_index(a);
2525 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
2526
2527 if (dest_chain <= attr->chain) {
2528 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
2529 return -EOPNOTSUPP;
2530 }
2531 if (dest_chain > max_chain) {
2532 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
2533 return -EOPNOTSUPP;
2534 }
2535 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2536 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2537 attr->dest_chain = dest_chain;
2538
2539 continue;
2540 }
2541
03a9d11e
OG
2542 return -EINVAL;
2543 }
bdd66ac0 2544
1cab1cd7 2545 attr->action = action;
e98bedf5 2546 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
bdd66ac0
OG
2547 return -EOPNOTSUPP;
2548
e85e02ba 2549 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
e98bedf5
EB
2550 NL_SET_ERR_MSG_MOD(extack,
2551 "current firmware doesn't support split rule for port mirroring");
592d3651
CM
2552 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2553 return -EOPNOTSUPP;
2554 }
2555
31c8eba5 2556 return 0;
03a9d11e
OG
2557}
2558
5dbe906f 2559static void get_flags(int flags, u16 *flow_flags)
60bd4af8 2560{
5dbe906f 2561 u16 __flow_flags = 0;
60bd4af8
OG
2562
2563 if (flags & MLX5E_TC_INGRESS)
2564 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2565 if (flags & MLX5E_TC_EGRESS)
2566 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2567
2568 *flow_flags = __flow_flags;
2569}
2570
05866c82
OG
2571static const struct rhashtable_params tc_ht_params = {
2572 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2573 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2574 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2575 .automatic_shrinking = true,
2576};
2577
2578static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
2579{
655dc3d2
OG
2580 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2581 struct mlx5e_rep_priv *uplink_rpriv;
2582
2583 if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
2584 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
ec1366c2 2585 return &uplink_rpriv->uplink_priv.tc_ht;
655dc3d2
OG
2586 } else
2587 return &priv->fs.tc.ht;
05866c82
OG
2588}
2589
a88780a9
RD
2590static int
2591mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
5dbe906f 2592 struct tc_cls_flower_offload *f, u16 flow_flags,
a88780a9
RD
2593 struct mlx5e_tc_flow_parse_attr **__parse_attr,
2594 struct mlx5e_tc_flow **__flow)
e3a2b7ed 2595{
17091853 2596 struct mlx5e_tc_flow_parse_attr *parse_attr;
3bc4b7bf 2597 struct mlx5e_tc_flow *flow;
a88780a9 2598 int err;
e3a2b7ed 2599
65ba8fb7 2600 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1b9a07ee 2601 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
17091853 2602 if (!parse_attr || !flow) {
e3a2b7ed
AV
2603 err = -ENOMEM;
2604 goto err_free;
2605 }
2606
2607 flow->cookie = f->cookie;
65ba8fb7 2608 flow->flags = flow_flags;
655dc3d2 2609 flow->priv = priv;
e3a2b7ed 2610
a88780a9
RD
2611 *__flow = flow;
2612 *__parse_attr = parse_attr;
2613
2614 return 0;
2615
2616err_free:
2617 kfree(flow);
2618 kvfree(parse_attr);
2619 return err;
2620}
2621
2622static int
2623mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2624 struct tc_cls_flower_offload *f,
5dbe906f 2625 u16 flow_flags,
d11afc26 2626 struct net_device *filter_dev,
a88780a9
RD
2627 struct mlx5e_tc_flow **__flow)
2628{
2629 struct netlink_ext_ack *extack = f->common.extack;
2630 struct mlx5e_tc_flow_parse_attr *parse_attr;
2631 struct mlx5e_tc_flow *flow;
2632 int attr_size, err;
e3a2b7ed 2633
a88780a9
RD
2634 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2635 attr_size = sizeof(struct mlx5_esw_flow_attr);
2636 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2637 &parse_attr, &flow);
2638 if (err)
2639 goto out;
d11afc26
OS
2640 parse_attr->filter_dev = filter_dev;
2641 flow->esw_attr->parse_attr = parse_attr;
54c177ca
OS
2642 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2643 f, filter_dev);
d11afc26
OS
2644 if (err)
2645 goto err_free;
a88780a9 2646
bf07aa73
PB
2647 flow->esw_attr->chain = f->common.chain_index;
2648 flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
a88780a9
RD
2649 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
2650 if (err)
2651 goto err_free;
2652
2653 err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
5dbe906f 2654 if (err)
c83954ab 2655 goto err_free;
e3a2b7ed 2656
a88780a9 2657 if (!(flow->esw_attr->action &
60786f09 2658 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
af1607c3
JL
2659 kvfree(parse_attr);
2660
a88780a9 2661 *__flow = flow;
5c40348c 2662
a88780a9
RD
2663 return 0;
2664
2665err_free:
2666 kfree(flow);
2667 kvfree(parse_attr);
2668out:
232c0013 2669 return err;
a88780a9
RD
2670}
2671
2672static int
2673mlx5e_add_nic_flow(struct mlx5e_priv *priv,
2674 struct tc_cls_flower_offload *f,
5dbe906f 2675 u16 flow_flags,
d11afc26 2676 struct net_device *filter_dev,
a88780a9
RD
2677 struct mlx5e_tc_flow **__flow)
2678{
2679 struct netlink_ext_ack *extack = f->common.extack;
2680 struct mlx5e_tc_flow_parse_attr *parse_attr;
2681 struct mlx5e_tc_flow *flow;
2682 int attr_size, err;
2683
bf07aa73
PB
2684 /* multi-chain not supported for NIC rules */
2685 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
2686 return -EOPNOTSUPP;
2687
a88780a9
RD
2688 flow_flags |= MLX5E_TC_FLOW_NIC;
2689 attr_size = sizeof(struct mlx5_nic_flow_attr);
2690 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2691 &parse_attr, &flow);
2692 if (err)
2693 goto out;
2694
d11afc26 2695 parse_attr->filter_dev = filter_dev;
54c177ca
OS
2696 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2697 f, filter_dev);
d11afc26
OS
2698 if (err)
2699 goto err_free;
2700
a88780a9
RD
2701 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
2702 if (err)
2703 goto err_free;
2704
2705 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
2706 if (err)
2707 goto err_free;
2708
2709 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2710 kvfree(parse_attr);
2711 *__flow = flow;
2712
2713 return 0;
e3a2b7ed 2714
e3a2b7ed 2715err_free:
a88780a9 2716 kfree(flow);
17091853 2717 kvfree(parse_attr);
a88780a9
RD
2718out:
2719 return err;
2720}
2721
2722static int
2723mlx5e_tc_add_flow(struct mlx5e_priv *priv,
2724 struct tc_cls_flower_offload *f,
2725 int flags,
d11afc26 2726 struct net_device *filter_dev,
a88780a9
RD
2727 struct mlx5e_tc_flow **flow)
2728{
2729 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5dbe906f 2730 u16 flow_flags;
a88780a9
RD
2731 int err;
2732
2733 get_flags(flags, &flow_flags);
2734
bf07aa73
PB
2735 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
2736 return -EOPNOTSUPP;
2737
a88780a9 2738 if (esw && esw->mode == SRIOV_OFFLOADS)
d11afc26
OS
2739 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
2740 filter_dev, flow);
a88780a9 2741 else
d11afc26
OS
2742 err = mlx5e_add_nic_flow(priv, f, flow_flags,
2743 filter_dev, flow);
a88780a9
RD
2744
2745 return err;
2746}
2747
71d82d2a 2748int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
a88780a9
RD
2749 struct tc_cls_flower_offload *f, int flags)
2750{
2751 struct netlink_ext_ack *extack = f->common.extack;
2752 struct rhashtable *tc_ht = get_tc_ht(priv);
2753 struct mlx5e_tc_flow *flow;
2754 int err = 0;
2755
2756 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2757 if (flow) {
2758 NL_SET_ERR_MSG_MOD(extack,
2759 "flow cookie already exists, ignoring");
2760 netdev_warn_once(priv->netdev,
2761 "flow cookie %lx already exists, ignoring\n",
2762 f->cookie);
2763 goto out;
2764 }
2765
d11afc26 2766 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
a88780a9
RD
2767 if (err)
2768 goto out;
2769
2770 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
2771 if (err)
2772 goto err_free;
2773
2774 return 0;
2775
2776err_free:
2777 mlx5e_tc_del_flow(priv, flow);
232c0013 2778 kfree(flow);
a88780a9 2779out:
e3a2b7ed
AV
2780 return err;
2781}
2782
8f8ae895
OG
2783#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
2784#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
2785
2786static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
2787{
2788 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
2789 return true;
2790
2791 return false;
2792}
2793
71d82d2a 2794int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
60bd4af8 2795 struct tc_cls_flower_offload *f, int flags)
e3a2b7ed 2796{
05866c82 2797 struct rhashtable *tc_ht = get_tc_ht(priv);
e3a2b7ed 2798 struct mlx5e_tc_flow *flow;
e3a2b7ed 2799
05866c82 2800 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
8f8ae895 2801 if (!flow || !same_flow_direction(flow, flags))
e3a2b7ed
AV
2802 return -EINVAL;
2803
05866c82 2804 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
e3a2b7ed 2805
961e8979 2806 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
2807
2808 kfree(flow);
2809
2810 return 0;
2811}
2812
71d82d2a 2813int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
60bd4af8 2814 struct tc_cls_flower_offload *f, int flags)
aad7e08d 2815{
05866c82 2816 struct rhashtable *tc_ht = get_tc_ht(priv);
aad7e08d 2817 struct mlx5e_tc_flow *flow;
aad7e08d
AV
2818 struct mlx5_fc *counter;
2819 u64 bytes;
2820 u64 packets;
2821 u64 lastuse;
2822
05866c82 2823 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
8f8ae895 2824 if (!flow || !same_flow_direction(flow, flags))
aad7e08d
AV
2825 return -EINVAL;
2826
0b67a38f
HHZ
2827 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2828 return 0;
2829
b8aee822 2830 counter = mlx5e_tc_get_counter(flow);
aad7e08d
AV
2831 if (!counter)
2832 return 0;
2833
2834 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2835
d897a638 2836 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
fed06ee8 2837
aad7e08d
AV
2838 return 0;
2839}
2840
4d8fcf21
AH
2841static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
2842 struct mlx5e_priv *peer_priv)
2843{
2844 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
2845 struct mlx5e_hairpin_entry *hpe;
2846 u16 peer_vhca_id;
2847 int bkt;
2848
2849 if (!same_hw_devs(priv, peer_priv))
2850 return;
2851
2852 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
2853
2854 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
2855 if (hpe->peer_vhca_id == peer_vhca_id)
2856 hpe->hp->pair->peer_gone = true;
2857 }
2858}
2859
2860static int mlx5e_tc_netdev_event(struct notifier_block *this,
2861 unsigned long event, void *ptr)
2862{
2863 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2864 struct mlx5e_flow_steering *fs;
2865 struct mlx5e_priv *peer_priv;
2866 struct mlx5e_tc_table *tc;
2867 struct mlx5e_priv *priv;
2868
2869 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
2870 event != NETDEV_UNREGISTER ||
2871 ndev->reg_state == NETREG_REGISTERED)
2872 return NOTIFY_DONE;
2873
2874 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
2875 fs = container_of(tc, struct mlx5e_flow_steering, tc);
2876 priv = container_of(fs, struct mlx5e_priv, fs);
2877 peer_priv = netdev_priv(ndev);
2878 if (priv == peer_priv ||
2879 !(priv->netdev->features & NETIF_F_HW_TC))
2880 return NOTIFY_DONE;
2881
2882 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
2883
2884 return NOTIFY_DONE;
2885}
2886
655dc3d2 2887int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
e8f887ac 2888{
acff797c 2889 struct mlx5e_tc_table *tc = &priv->fs.tc;
4d8fcf21 2890 int err;
e8f887ac 2891
11c9c548 2892 hash_init(tc->mod_hdr_tbl);
5c65c564 2893 hash_init(tc->hairpin_tbl);
11c9c548 2894
4d8fcf21
AH
2895 err = rhashtable_init(&tc->ht, &tc_ht_params);
2896 if (err)
2897 return err;
2898
2899 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
2900 if (register_netdevice_notifier(&tc->netdevice_nb)) {
2901 tc->netdevice_nb.notifier_call = NULL;
2902 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
2903 }
2904
2905 return err;
e8f887ac
AV
2906}
2907
2908static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2909{
2910 struct mlx5e_tc_flow *flow = ptr;
655dc3d2 2911 struct mlx5e_priv *priv = flow->priv;
e8f887ac 2912
961e8979 2913 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
2914 kfree(flow);
2915}
2916
655dc3d2 2917void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
e8f887ac 2918{
acff797c 2919 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac 2920
4d8fcf21
AH
2921 if (tc->netdevice_nb.notifier_call)
2922 unregister_netdevice_notifier(&tc->netdevice_nb);
2923
655dc3d2 2924 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
e8f887ac 2925
acff797c
MG
2926 if (!IS_ERR_OR_NULL(tc->t)) {
2927 mlx5_destroy_flow_table(tc->t);
2928 tc->t = NULL;
e8f887ac
AV
2929 }
2930}
655dc3d2
OG
2931
2932int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
2933{
2934 return rhashtable_init(tc_ht, &tc_ht_params);
2935}
2936
2937void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
2938{
2939 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
2940}
01252a27
OG
2941
2942int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
2943{
2944 struct rhashtable *tc_ht = get_tc_ht(priv);
2945
2946 return atomic_read(&tc_ht->nelems);
2947}