net/mlx5e: Infrastructure for duplicated offloading of TC flows
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/arp.h>
48 #include "en.h"
49 #include "en_rep.h"
50 #include "en_tc.h"
51 #include "eswitch.h"
52 #include "fs_core.h"
53 #include "en/port.h"
54 #include "en/tc_tun.h"
55 #include "lib/devcom.h"
56
57 struct mlx5_nic_flow_attr {
58         u32 action;
59         u32 flow_tag;
60         u32 mod_hdr_id;
61         u32 hairpin_tirn;
62         u8 match_level;
63         struct mlx5_flow_table  *hairpin_ft;
64         struct mlx5_fc          *counter;
65 };
66
67 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
68
69 enum {
70         MLX5E_TC_FLOW_INGRESS   = MLX5E_TC_INGRESS,
71         MLX5E_TC_FLOW_EGRESS    = MLX5E_TC_EGRESS,
72         MLX5E_TC_FLOW_ESWITCH   = BIT(MLX5E_TC_FLOW_BASE),
73         MLX5E_TC_FLOW_NIC       = BIT(MLX5E_TC_FLOW_BASE + 1),
74         MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
75         MLX5E_TC_FLOW_HAIRPIN   = BIT(MLX5E_TC_FLOW_BASE + 3),
76         MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
77         MLX5E_TC_FLOW_SLOW        = BIT(MLX5E_TC_FLOW_BASE + 5),
78         MLX5E_TC_FLOW_DUP         = BIT(MLX5E_TC_FLOW_BASE + 6),
79 };
80
81 #define MLX5E_TC_MAX_SPLITS 1
82
83 /* Helper struct for accessing a struct containing list_head array.
84  * Containing struct
85  *   |- Helper array
86  *      [0] Helper item 0
87  *          |- list_head item 0
88  *          |- index (0)
89  *      [1] Helper item 1
90  *          |- list_head item 1
91  *          |- index (1)
92  * To access the containing struct from one of the list_head items:
93  * 1. Get the helper item from the list_head item using
94  *    helper item =
95  *        container_of(list_head item, helper struct type, list_head field)
96  * 2. Get the contining struct from the helper item and its index in the array:
97  *    containing struct =
98  *        container_of(helper item, containing struct type, helper field[index])
99  */
100 struct encap_flow_item {
101         struct list_head list;
102         int index;
103 };
104
105 struct mlx5e_tc_flow {
106         struct rhash_head       node;
107         struct mlx5e_priv       *priv;
108         u64                     cookie;
109         u16                     flags;
110         struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
111         /* Flow can be associated with multiple encap IDs.
112          * The number of encaps is bounded by the number of supported
113          * destinations.
114          */
115         struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
116         struct mlx5e_tc_flow    *peer_flow;
117         struct list_head        mod_hdr; /* flows sharing the same mod hdr ID */
118         struct list_head        hairpin; /* flows sharing the same hairpin */
119         struct list_head        peer;    /* flows with peer flow */
120         union {
121                 struct mlx5_esw_flow_attr esw_attr[0];
122                 struct mlx5_nic_flow_attr nic_attr[0];
123         };
124 };
125
126 struct mlx5e_tc_flow_parse_attr {
127         struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
128         struct net_device *filter_dev;
129         struct mlx5_flow_spec spec;
130         int num_mod_hdr_actions;
131         void *mod_hdr_actions;
132         int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
133 };
134
135 #define MLX5E_TC_TABLE_NUM_GROUPS 4
136 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
137
138 struct mlx5e_hairpin {
139         struct mlx5_hairpin *pair;
140
141         struct mlx5_core_dev *func_mdev;
142         struct mlx5e_priv *func_priv;
143         u32 tdn;
144         u32 tirn;
145
146         int num_channels;
147         struct mlx5e_rqt indir_rqt;
148         u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
149         struct mlx5e_ttc_table ttc;
150 };
151
152 struct mlx5e_hairpin_entry {
153         /* a node of a hash table which keeps all the  hairpin entries */
154         struct hlist_node hairpin_hlist;
155
156         /* flows sharing the same hairpin */
157         struct list_head flows;
158
159         u16 peer_vhca_id;
160         u8 prio;
161         struct mlx5e_hairpin *hp;
162 };
163
164 struct mod_hdr_key {
165         int num_actions;
166         void *actions;
167 };
168
169 struct mlx5e_mod_hdr_entry {
170         /* a node of a hash table which keeps all the mod_hdr entries */
171         struct hlist_node mod_hdr_hlist;
172
173         /* flows sharing the same mod_hdr entry */
174         struct list_head flows;
175
176         struct mod_hdr_key key;
177
178         u32 mod_hdr_id;
179 };
180
181 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
182
183 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
184 {
185         return jhash(key->actions,
186                      key->num_actions * MLX5_MH_ACT_SZ, 0);
187 }
188
189 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
190                                    struct mod_hdr_key *b)
191 {
192         if (a->num_actions != b->num_actions)
193                 return 1;
194
195         return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
196 }
197
198 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
199                                 struct mlx5e_tc_flow *flow,
200                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
201 {
202         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
203         int num_actions, actions_size, namespace, err;
204         struct mlx5e_mod_hdr_entry *mh;
205         struct mod_hdr_key key;
206         bool found = false;
207         u32 hash_key;
208
209         num_actions  = parse_attr->num_mod_hdr_actions;
210         actions_size = MLX5_MH_ACT_SZ * num_actions;
211
212         key.actions = parse_attr->mod_hdr_actions;
213         key.num_actions = num_actions;
214
215         hash_key = hash_mod_hdr_info(&key);
216
217         if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
218                 namespace = MLX5_FLOW_NAMESPACE_FDB;
219                 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
220                                        mod_hdr_hlist, hash_key) {
221                         if (!cmp_mod_hdr_info(&mh->key, &key)) {
222                                 found = true;
223                                 break;
224                         }
225                 }
226         } else {
227                 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
228                 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
229                                        mod_hdr_hlist, hash_key) {
230                         if (!cmp_mod_hdr_info(&mh->key, &key)) {
231                                 found = true;
232                                 break;
233                         }
234                 }
235         }
236
237         if (found)
238                 goto attach_flow;
239
240         mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
241         if (!mh)
242                 return -ENOMEM;
243
244         mh->key.actions = (void *)mh + sizeof(*mh);
245         memcpy(mh->key.actions, key.actions, actions_size);
246         mh->key.num_actions = num_actions;
247         INIT_LIST_HEAD(&mh->flows);
248
249         err = mlx5_modify_header_alloc(priv->mdev, namespace,
250                                        mh->key.num_actions,
251                                        mh->key.actions,
252                                        &mh->mod_hdr_id);
253         if (err)
254                 goto out_err;
255
256         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
257                 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
258         else
259                 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
260
261 attach_flow:
262         list_add(&flow->mod_hdr, &mh->flows);
263         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
264                 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
265         else
266                 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
267
268         return 0;
269
270 out_err:
271         kfree(mh);
272         return err;
273 }
274
275 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
276                                  struct mlx5e_tc_flow *flow)
277 {
278         struct list_head *next = flow->mod_hdr.next;
279
280         list_del(&flow->mod_hdr);
281
282         if (list_empty(next)) {
283                 struct mlx5e_mod_hdr_entry *mh;
284
285                 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
286
287                 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
288                 hash_del(&mh->mod_hdr_hlist);
289                 kfree(mh);
290         }
291 }
292
293 static
294 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
295 {
296         struct net_device *netdev;
297         struct mlx5e_priv *priv;
298
299         netdev = __dev_get_by_index(net, ifindex);
300         priv = netdev_priv(netdev);
301         return priv->mdev;
302 }
303
304 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
305 {
306         u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
307         void *tirc;
308         int err;
309
310         err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
311         if (err)
312                 goto alloc_tdn_err;
313
314         tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
315
316         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
317         MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
318         MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
319
320         err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
321         if (err)
322                 goto create_tir_err;
323
324         return 0;
325
326 create_tir_err:
327         mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
328 alloc_tdn_err:
329         return err;
330 }
331
332 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
333 {
334         mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
335         mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
336 }
337
338 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
339 {
340         u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
341         struct mlx5e_priv *priv = hp->func_priv;
342         int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
343
344         mlx5e_build_default_indir_rqt(indirection_rqt, sz,
345                                       hp->num_channels);
346
347         for (i = 0; i < sz; i++) {
348                 ix = i;
349                 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
350                         ix = mlx5e_bits_invert(i, ilog2(sz));
351                 ix = indirection_rqt[ix];
352                 rqn = hp->pair->rqn[ix];
353                 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
354         }
355 }
356
357 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
358 {
359         int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
360         struct mlx5e_priv *priv = hp->func_priv;
361         struct mlx5_core_dev *mdev = priv->mdev;
362         void *rqtc;
363         u32 *in;
364
365         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
366         in = kvzalloc(inlen, GFP_KERNEL);
367         if (!in)
368                 return -ENOMEM;
369
370         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
371
372         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
373         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
374
375         mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
376
377         err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
378         if (!err)
379                 hp->indir_rqt.enabled = true;
380
381         kvfree(in);
382         return err;
383 }
384
385 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
386 {
387         struct mlx5e_priv *priv = hp->func_priv;
388         u32 in[MLX5_ST_SZ_DW(create_tir_in)];
389         int tt, i, err;
390         void *tirc;
391
392         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
393                 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
394
395                 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
396                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
397
398                 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
399                 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
400                 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
401                 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
402
403                 err = mlx5_core_create_tir(hp->func_mdev, in,
404                                            MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
405                 if (err) {
406                         mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
407                         goto err_destroy_tirs;
408                 }
409         }
410         return 0;
411
412 err_destroy_tirs:
413         for (i = 0; i < tt; i++)
414                 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
415         return err;
416 }
417
418 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
419 {
420         int tt;
421
422         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
423                 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
424 }
425
426 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
427                                          struct ttc_params *ttc_params)
428 {
429         struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
430         int tt;
431
432         memset(ttc_params, 0, sizeof(*ttc_params));
433
434         ttc_params->any_tt_tirn = hp->tirn;
435
436         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
437                 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
438
439         ft_attr->max_fte = MLX5E_NUM_TT;
440         ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
441         ft_attr->prio = MLX5E_TC_PRIO;
442 }
443
444 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
445 {
446         struct mlx5e_priv *priv = hp->func_priv;
447         struct ttc_params ttc_params;
448         int err;
449
450         err = mlx5e_hairpin_create_indirect_rqt(hp);
451         if (err)
452                 return err;
453
454         err = mlx5e_hairpin_create_indirect_tirs(hp);
455         if (err)
456                 goto err_create_indirect_tirs;
457
458         mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
459         err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
460         if (err)
461                 goto err_create_ttc_table;
462
463         netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
464                    hp->num_channels, hp->ttc.ft.t->id);
465
466         return 0;
467
468 err_create_ttc_table:
469         mlx5e_hairpin_destroy_indirect_tirs(hp);
470 err_create_indirect_tirs:
471         mlx5e_destroy_rqt(priv, &hp->indir_rqt);
472
473         return err;
474 }
475
476 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
477 {
478         struct mlx5e_priv *priv = hp->func_priv;
479
480         mlx5e_destroy_ttc_table(priv, &hp->ttc);
481         mlx5e_hairpin_destroy_indirect_tirs(hp);
482         mlx5e_destroy_rqt(priv, &hp->indir_rqt);
483 }
484
485 static struct mlx5e_hairpin *
486 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
487                      int peer_ifindex)
488 {
489         struct mlx5_core_dev *func_mdev, *peer_mdev;
490         struct mlx5e_hairpin *hp;
491         struct mlx5_hairpin *pair;
492         int err;
493
494         hp = kzalloc(sizeof(*hp), GFP_KERNEL);
495         if (!hp)
496                 return ERR_PTR(-ENOMEM);
497
498         func_mdev = priv->mdev;
499         peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
500
501         pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
502         if (IS_ERR(pair)) {
503                 err = PTR_ERR(pair);
504                 goto create_pair_err;
505         }
506         hp->pair = pair;
507         hp->func_mdev = func_mdev;
508         hp->func_priv = priv;
509         hp->num_channels = params->num_channels;
510
511         err = mlx5e_hairpin_create_transport(hp);
512         if (err)
513                 goto create_transport_err;
514
515         if (hp->num_channels > 1) {
516                 err = mlx5e_hairpin_rss_init(hp);
517                 if (err)
518                         goto rss_init_err;
519         }
520
521         return hp;
522
523 rss_init_err:
524         mlx5e_hairpin_destroy_transport(hp);
525 create_transport_err:
526         mlx5_core_hairpin_destroy(hp->pair);
527 create_pair_err:
528         kfree(hp);
529         return ERR_PTR(err);
530 }
531
532 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
533 {
534         if (hp->num_channels > 1)
535                 mlx5e_hairpin_rss_cleanup(hp);
536         mlx5e_hairpin_destroy_transport(hp);
537         mlx5_core_hairpin_destroy(hp->pair);
538         kvfree(hp);
539 }
540
541 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
542 {
543         return (peer_vhca_id << 16 | prio);
544 }
545
546 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
547                                                      u16 peer_vhca_id, u8 prio)
548 {
549         struct mlx5e_hairpin_entry *hpe;
550         u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
551
552         hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
553                                hairpin_hlist, hash_key) {
554                 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
555                         return hpe;
556         }
557
558         return NULL;
559 }
560
561 #define UNKNOWN_MATCH_PRIO 8
562
563 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
564                                   struct mlx5_flow_spec *spec, u8 *match_prio,
565                                   struct netlink_ext_ack *extack)
566 {
567         void *headers_c, *headers_v;
568         u8 prio_val, prio_mask = 0;
569         bool vlan_present;
570
571 #ifdef CONFIG_MLX5_CORE_EN_DCB
572         if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
573                 NL_SET_ERR_MSG_MOD(extack,
574                                    "only PCP trust state supported for hairpin");
575                 return -EOPNOTSUPP;
576         }
577 #endif
578         headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
579         headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
580
581         vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
582         if (vlan_present) {
583                 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
584                 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
585         }
586
587         if (!vlan_present || !prio_mask) {
588                 prio_val = UNKNOWN_MATCH_PRIO;
589         } else if (prio_mask != 0x7) {
590                 NL_SET_ERR_MSG_MOD(extack,
591                                    "masked priority match not supported for hairpin");
592                 return -EOPNOTSUPP;
593         }
594
595         *match_prio = prio_val;
596         return 0;
597 }
598
599 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
600                                   struct mlx5e_tc_flow *flow,
601                                   struct mlx5e_tc_flow_parse_attr *parse_attr,
602                                   struct netlink_ext_ack *extack)
603 {
604         int peer_ifindex = parse_attr->mirred_ifindex[0];
605         struct mlx5_hairpin_params params;
606         struct mlx5_core_dev *peer_mdev;
607         struct mlx5e_hairpin_entry *hpe;
608         struct mlx5e_hairpin *hp;
609         u64 link_speed64;
610         u32 link_speed;
611         u8 match_prio;
612         u16 peer_id;
613         int err;
614
615         peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
616         if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
617                 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
618                 return -EOPNOTSUPP;
619         }
620
621         peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
622         err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
623                                      extack);
624         if (err)
625                 return err;
626         hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
627         if (hpe)
628                 goto attach_flow;
629
630         hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
631         if (!hpe)
632                 return -ENOMEM;
633
634         INIT_LIST_HEAD(&hpe->flows);
635         hpe->peer_vhca_id = peer_id;
636         hpe->prio = match_prio;
637
638         params.log_data_size = 15;
639         params.log_data_size = min_t(u8, params.log_data_size,
640                                      MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
641         params.log_data_size = max_t(u8, params.log_data_size,
642                                      MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
643
644         params.log_num_packets = params.log_data_size -
645                                  MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
646         params.log_num_packets = min_t(u8, params.log_num_packets,
647                                        MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
648
649         params.q_counter = priv->q_counter;
650         /* set hairpin pair per each 50Gbs share of the link */
651         mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
652         link_speed = max_t(u32, link_speed, 50000);
653         link_speed64 = link_speed;
654         do_div(link_speed64, 50000);
655         params.num_channels = link_speed64;
656
657         hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
658         if (IS_ERR(hp)) {
659                 err = PTR_ERR(hp);
660                 goto create_hairpin_err;
661         }
662
663         netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
664                    hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
665                    hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
666
667         hpe->hp = hp;
668         hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
669                  hash_hairpin_info(peer_id, match_prio));
670
671 attach_flow:
672         if (hpe->hp->num_channels > 1) {
673                 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
674                 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
675         } else {
676                 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
677         }
678         list_add(&flow->hairpin, &hpe->flows);
679
680         return 0;
681
682 create_hairpin_err:
683         kfree(hpe);
684         return err;
685 }
686
687 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
688                                    struct mlx5e_tc_flow *flow)
689 {
690         struct list_head *next = flow->hairpin.next;
691
692         list_del(&flow->hairpin);
693
694         /* no more hairpin flows for us, release the hairpin pair */
695         if (list_empty(next)) {
696                 struct mlx5e_hairpin_entry *hpe;
697
698                 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
699
700                 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
701                            hpe->hp->pair->peer_mdev->priv.name);
702
703                 mlx5e_hairpin_destroy(hpe->hp);
704                 hash_del(&hpe->hairpin_hlist);
705                 kfree(hpe);
706         }
707 }
708
709 static int
710 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
711                       struct mlx5e_tc_flow_parse_attr *parse_attr,
712                       struct mlx5e_tc_flow *flow,
713                       struct netlink_ext_ack *extack)
714 {
715         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
716         struct mlx5_core_dev *dev = priv->mdev;
717         struct mlx5_flow_destination dest[2] = {};
718         struct mlx5_flow_act flow_act = {
719                 .action = attr->action,
720                 .flow_tag = attr->flow_tag,
721                 .reformat_id = 0,
722                 .flags    = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
723         };
724         struct mlx5_fc *counter = NULL;
725         bool table_created = false;
726         int err, dest_ix = 0;
727
728         if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
729                 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
730                 if (err) {
731                         goto err_add_hairpin_flow;
732                 }
733                 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
734                         dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
735                         dest[dest_ix].ft = attr->hairpin_ft;
736                 } else {
737                         dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
738                         dest[dest_ix].tir_num = attr->hairpin_tirn;
739                 }
740                 dest_ix++;
741         } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
742                 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
743                 dest[dest_ix].ft = priv->fs.vlan.ft.t;
744                 dest_ix++;
745         }
746
747         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
748                 counter = mlx5_fc_create(dev, true);
749                 if (IS_ERR(counter)) {
750                         err = PTR_ERR(counter);
751                         goto err_fc_create;
752                 }
753                 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
754                 dest[dest_ix].counter_id = mlx5_fc_id(counter);
755                 dest_ix++;
756                 attr->counter = counter;
757         }
758
759         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
760                 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
761                 flow_act.modify_id = attr->mod_hdr_id;
762                 kfree(parse_attr->mod_hdr_actions);
763                 if (err)
764                         goto err_create_mod_hdr_id;
765         }
766
767         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
768                 int tc_grp_size, tc_tbl_size;
769                 u32 max_flow_counter;
770
771                 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
772                                     MLX5_CAP_GEN(dev, max_flow_counter_15_0);
773
774                 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
775
776                 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
777                                     BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
778
779                 priv->fs.tc.t =
780                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
781                                                             MLX5E_TC_PRIO,
782                                                             tc_tbl_size,
783                                                             MLX5E_TC_TABLE_NUM_GROUPS,
784                                                             MLX5E_TC_FT_LEVEL, 0);
785                 if (IS_ERR(priv->fs.tc.t)) {
786                         NL_SET_ERR_MSG_MOD(extack,
787                                            "Failed to create tc offload table\n");
788                         netdev_err(priv->netdev,
789                                    "Failed to create tc offload table\n");
790                         err = PTR_ERR(priv->fs.tc.t);
791                         goto err_create_ft;
792                 }
793
794                 table_created = true;
795         }
796
797         if (attr->match_level != MLX5_MATCH_NONE)
798                 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
799
800         flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
801                                             &flow_act, dest, dest_ix);
802
803         if (IS_ERR(flow->rule[0])) {
804                 err = PTR_ERR(flow->rule[0]);
805                 goto err_add_rule;
806         }
807
808         return 0;
809
810 err_add_rule:
811         if (table_created) {
812                 mlx5_destroy_flow_table(priv->fs.tc.t);
813                 priv->fs.tc.t = NULL;
814         }
815 err_create_ft:
816         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
817                 mlx5e_detach_mod_hdr(priv, flow);
818 err_create_mod_hdr_id:
819         mlx5_fc_destroy(dev, counter);
820 err_fc_create:
821         if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
822                 mlx5e_hairpin_flow_del(priv, flow);
823 err_add_hairpin_flow:
824         return err;
825 }
826
827 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
828                                   struct mlx5e_tc_flow *flow)
829 {
830         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
831         struct mlx5_fc *counter = NULL;
832
833         counter = attr->counter;
834         mlx5_del_flow_rules(flow->rule[0]);
835         mlx5_fc_destroy(priv->mdev, counter);
836
837         if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
838                 mlx5_destroy_flow_table(priv->fs.tc.t);
839                 priv->fs.tc.t = NULL;
840         }
841
842         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
843                 mlx5e_detach_mod_hdr(priv, flow);
844
845         if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
846                 mlx5e_hairpin_flow_del(priv, flow);
847 }
848
849 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
850                                struct mlx5e_tc_flow *flow, int out_index);
851
852 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
853                               struct ip_tunnel_info *tun_info,
854                               struct net_device *mirred_dev,
855                               struct net_device **encap_dev,
856                               struct mlx5e_tc_flow *flow,
857                               struct netlink_ext_ack *extack,
858                               int out_index);
859
860 static struct mlx5_flow_handle *
861 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
862                            struct mlx5e_tc_flow *flow,
863                            struct mlx5_flow_spec *spec,
864                            struct mlx5_esw_flow_attr *attr)
865 {
866         struct mlx5_flow_handle *rule;
867
868         rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
869         if (IS_ERR(rule))
870                 return rule;
871
872         if (attr->split_count) {
873                 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
874                 if (IS_ERR(flow->rule[1])) {
875                         mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
876                         return flow->rule[1];
877                 }
878         }
879
880         flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
881         return rule;
882 }
883
884 static void
885 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
886                              struct mlx5e_tc_flow *flow,
887                            struct mlx5_esw_flow_attr *attr)
888 {
889         flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
890
891         if (attr->split_count)
892                 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
893
894         mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
895 }
896
897 static struct mlx5_flow_handle *
898 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
899                               struct mlx5e_tc_flow *flow,
900                               struct mlx5_flow_spec *spec,
901                               struct mlx5_esw_flow_attr *slow_attr)
902 {
903         struct mlx5_flow_handle *rule;
904
905         memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
906         slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
907         slow_attr->split_count = 0,
908         slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
909
910         rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
911         if (!IS_ERR(rule))
912                 flow->flags |= MLX5E_TC_FLOW_SLOW;
913
914         return rule;
915 }
916
917 static void
918 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
919                                   struct mlx5e_tc_flow *flow,
920                                   struct mlx5_esw_flow_attr *slow_attr)
921 {
922         memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
923         mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
924         flow->flags &= ~MLX5E_TC_FLOW_SLOW;
925 }
926
927 static int
928 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
929                       struct mlx5e_tc_flow_parse_attr *parse_attr,
930                       struct mlx5e_tc_flow *flow,
931                       struct netlink_ext_ack *extack)
932 {
933         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
934         u32 max_chain = mlx5_eswitch_get_chain_range(esw);
935         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
936         u16 max_prio = mlx5_eswitch_get_prio_range(esw);
937         struct net_device *out_dev, *encap_dev = NULL;
938         struct mlx5_fc *counter = NULL;
939         struct mlx5e_rep_priv *rpriv;
940         struct mlx5e_priv *out_priv;
941         int err = 0, encap_err = 0;
942         int out_index;
943
944         /* if prios are not supported, keep the old behaviour of using same prio
945          * for all offloaded rules.
946          */
947         if (!mlx5_eswitch_prios_supported(esw))
948                 attr->prio = 1;
949
950         if (attr->chain > max_chain) {
951                 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
952                 err = -EOPNOTSUPP;
953                 goto err_max_prio_chain;
954         }
955
956         if (attr->prio > max_prio) {
957                 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
958                 err = -EOPNOTSUPP;
959                 goto err_max_prio_chain;
960         }
961
962         for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
963                 int mirred_ifindex;
964
965                 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
966                         continue;
967
968                 mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index];
969                 out_dev = __dev_get_by_index(dev_net(priv->netdev),
970                                              mirred_ifindex);
971                 err = mlx5e_attach_encap(priv,
972                                          &parse_attr->tun_info[out_index],
973                                          out_dev, &encap_dev, flow,
974                                          extack, out_index);
975                 if (err && err != -EAGAIN)
976                         goto err_attach_encap;
977                 if (err == -EAGAIN)
978                         encap_err = err;
979                 out_priv = netdev_priv(encap_dev);
980                 rpriv = out_priv->ppriv;
981                 attr->dests[out_index].rep = rpriv->rep;
982                 attr->dests[out_index].mdev = out_priv->mdev;
983         }
984
985         err = mlx5_eswitch_add_vlan_action(esw, attr);
986         if (err)
987                 goto err_add_vlan;
988
989         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
990                 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
991                 kfree(parse_attr->mod_hdr_actions);
992                 if (err)
993                         goto err_mod_hdr;
994         }
995
996         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
997                 counter = mlx5_fc_create(esw->dev, true);
998                 if (IS_ERR(counter)) {
999                         err = PTR_ERR(counter);
1000                         goto err_create_counter;
1001                 }
1002
1003                 attr->counter = counter;
1004         }
1005
1006         /* we get here if (1) there's no error or when
1007          * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
1008          */
1009         if (encap_err == -EAGAIN) {
1010                 /* continue with goto slow path rule instead */
1011                 struct mlx5_esw_flow_attr slow_attr;
1012
1013                 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
1014         } else {
1015                 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1016         }
1017
1018         if (IS_ERR(flow->rule[0])) {
1019                 err = PTR_ERR(flow->rule[0]);
1020                 goto err_add_rule;
1021         }
1022
1023         return 0;
1024
1025 err_add_rule:
1026         mlx5_fc_destroy(esw->dev, counter);
1027 err_create_counter:
1028         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1029                 mlx5e_detach_mod_hdr(priv, flow);
1030 err_mod_hdr:
1031         mlx5_eswitch_del_vlan_action(esw, attr);
1032 err_add_vlan:
1033         for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1034                 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1035                         mlx5e_detach_encap(priv, flow, out_index);
1036 err_attach_encap:
1037 err_max_prio_chain:
1038         return err;
1039 }
1040
1041 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1042                                   struct mlx5e_tc_flow *flow)
1043 {
1044         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1045         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1046         struct mlx5_esw_flow_attr slow_attr;
1047         int out_index;
1048
1049         if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1050                 if (flow->flags & MLX5E_TC_FLOW_SLOW)
1051                         mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1052                 else
1053                         mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1054         }
1055
1056         mlx5_eswitch_del_vlan_action(esw, attr);
1057
1058         for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1059                 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1060                         mlx5e_detach_encap(priv, flow, out_index);
1061         kvfree(attr->parse_attr);
1062
1063         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1064                 mlx5e_detach_mod_hdr(priv, flow);
1065
1066         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1067                 mlx5_fc_destroy(esw->dev, attr->counter);
1068 }
1069
1070 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1071                               struct mlx5e_encap_entry *e)
1072 {
1073         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1074         struct mlx5_esw_flow_attr slow_attr, *esw_attr;
1075         struct mlx5_flow_handle *rule;
1076         struct mlx5_flow_spec *spec;
1077         struct encap_flow_item *efi;
1078         struct mlx5e_tc_flow *flow;
1079         int err;
1080
1081         err = mlx5_packet_reformat_alloc(priv->mdev,
1082                                          e->reformat_type,
1083                                          e->encap_size, e->encap_header,
1084                                          MLX5_FLOW_NAMESPACE_FDB,
1085                                          &e->encap_id);
1086         if (err) {
1087                 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1088                                err);
1089                 return;
1090         }
1091         e->flags |= MLX5_ENCAP_ENTRY_VALID;
1092         mlx5e_rep_queue_neigh_stats_work(priv);
1093
1094         list_for_each_entry(efi, &e->flows, list) {
1095                 bool all_flow_encaps_valid = true;
1096                 int i;
1097
1098                 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1099                 esw_attr = flow->esw_attr;
1100                 spec = &esw_attr->parse_attr->spec;
1101
1102                 esw_attr->dests[efi->index].encap_id = e->encap_id;
1103                 esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1104                 /* Flow can be associated with multiple encap entries.
1105                  * Before offloading the flow verify that all of them have
1106                  * a valid neighbour.
1107                  */
1108                 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1109                         if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1110                                 continue;
1111                         if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1112                                 all_flow_encaps_valid = false;
1113                                 break;
1114                         }
1115                 }
1116                 /* Do not offload flows with unresolved neighbors */
1117                 if (!all_flow_encaps_valid)
1118                         continue;
1119                 /* update from slow path rule to encap rule */
1120                 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1121                 if (IS_ERR(rule)) {
1122                         err = PTR_ERR(rule);
1123                         mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1124                                        err);
1125                         continue;
1126                 }
1127
1128                 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1129                 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
1130                 flow->rule[0] = rule;
1131         }
1132 }
1133
1134 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1135                               struct mlx5e_encap_entry *e)
1136 {
1137         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1138         struct mlx5_esw_flow_attr slow_attr;
1139         struct mlx5_flow_handle *rule;
1140         struct mlx5_flow_spec *spec;
1141         struct encap_flow_item *efi;
1142         struct mlx5e_tc_flow *flow;
1143         int err;
1144
1145         list_for_each_entry(efi, &e->flows, list) {
1146                 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1147                 spec = &flow->esw_attr->parse_attr->spec;
1148
1149                 /* update from encap rule to slow path rule */
1150                 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
1151                 /* mark the flow's encap dest as non-valid */
1152                 flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1153
1154                 if (IS_ERR(rule)) {
1155                         err = PTR_ERR(rule);
1156                         mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1157                                        err);
1158                         continue;
1159                 }
1160
1161                 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1162                 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
1163                 flow->rule[0] = rule;
1164         }
1165
1166         if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
1167                 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1168                 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1169         }
1170 }
1171
1172 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1173 {
1174         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1175                 return flow->esw_attr->counter;
1176         else
1177                 return flow->nic_attr->counter;
1178 }
1179
1180 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1181 {
1182         struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1183         u64 bytes, packets, lastuse = 0;
1184         struct mlx5e_tc_flow *flow;
1185         struct mlx5e_encap_entry *e;
1186         struct mlx5_fc *counter;
1187         struct neigh_table *tbl;
1188         bool neigh_used = false;
1189         struct neighbour *n;
1190
1191         if (m_neigh->family == AF_INET)
1192                 tbl = &arp_tbl;
1193 #if IS_ENABLED(CONFIG_IPV6)
1194         else if (m_neigh->family == AF_INET6)
1195                 tbl = &nd_tbl;
1196 #endif
1197         else
1198                 return;
1199
1200         list_for_each_entry(e, &nhe->encap_list, encap_list) {
1201                 struct encap_flow_item *efi;
1202                 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1203                         continue;
1204                 list_for_each_entry(efi, &e->flows, list) {
1205                         flow = container_of(efi, struct mlx5e_tc_flow,
1206                                             encaps[efi->index]);
1207                         if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1208                                 counter = mlx5e_tc_get_counter(flow);
1209                                 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1210                                 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1211                                         neigh_used = true;
1212                                         break;
1213                                 }
1214                         }
1215                 }
1216                 if (neigh_used)
1217                         break;
1218         }
1219
1220         if (neigh_used) {
1221                 nhe->reported_lastuse = jiffies;
1222
1223                 /* find the relevant neigh according to the cached device and
1224                  * dst ip pair
1225                  */
1226                 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1227                 if (!n)
1228                         return;
1229
1230                 neigh_event_send(n, NULL);
1231                 neigh_release(n);
1232         }
1233 }
1234
1235 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1236                                struct mlx5e_tc_flow *flow, int out_index)
1237 {
1238         struct list_head *next = flow->encaps[out_index].list.next;
1239
1240         list_del(&flow->encaps[out_index].list);
1241         if (list_empty(next)) {
1242                 struct mlx5e_encap_entry *e;
1243
1244                 e = list_entry(next, struct mlx5e_encap_entry, flows);
1245                 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1246
1247                 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1248                         mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1249
1250                 hash_del_rcu(&e->encap_hlist);
1251                 kfree(e->encap_header);
1252                 kfree(e);
1253         }
1254 }
1255
1256 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1257 {
1258         struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1259
1260         if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
1261             !(flow->flags & MLX5E_TC_FLOW_DUP))
1262                 return;
1263
1264         mutex_lock(&esw->offloads.peer_mutex);
1265         list_del(&flow->peer);
1266         mutex_unlock(&esw->offloads.peer_mutex);
1267
1268         flow->flags &= ~MLX5E_TC_FLOW_DUP;
1269
1270         mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1271         kvfree(flow->peer_flow);
1272         flow->peer_flow = NULL;
1273 }
1274
1275 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1276 {
1277         struct mlx5_core_dev *dev = flow->priv->mdev;
1278         struct mlx5_devcom *devcom = dev->priv.devcom;
1279         struct mlx5_eswitch *peer_esw;
1280
1281         peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1282         if (!peer_esw)
1283                 return;
1284
1285         __mlx5e_tc_del_fdb_peer_flow(flow);
1286         mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1287 }
1288
1289 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1290                               struct mlx5e_tc_flow *flow)
1291 {
1292         if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1293                 mlx5e_tc_del_fdb_peer_flow(flow);
1294                 mlx5e_tc_del_fdb_flow(priv, flow);
1295         } else {
1296                 mlx5e_tc_del_nic_flow(priv, flow);
1297         }
1298 }
1299
1300
1301 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1302                              struct mlx5_flow_spec *spec,
1303                              struct tc_cls_flower_offload *f,
1304                              struct net_device *filter_dev)
1305 {
1306         struct netlink_ext_ack *extack = f->common.extack;
1307         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1308                                        outer_headers);
1309         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1310                                        outer_headers);
1311
1312         struct flow_dissector_key_control *enc_control =
1313                 skb_flow_dissector_target(f->dissector,
1314                                           FLOW_DISSECTOR_KEY_ENC_CONTROL,
1315                                           f->key);
1316         int err = 0;
1317
1318         err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1319                                  headers_c, headers_v);
1320         if (err) {
1321                 NL_SET_ERR_MSG_MOD(extack,
1322                                    "failed to parse tunnel attributes");
1323                 return err;
1324         }
1325
1326         if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1327                 struct flow_dissector_key_ipv4_addrs *key =
1328                         skb_flow_dissector_target(f->dissector,
1329                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1330                                                   f->key);
1331                 struct flow_dissector_key_ipv4_addrs *mask =
1332                         skb_flow_dissector_target(f->dissector,
1333                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1334                                                   f->mask);
1335                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1336                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
1337                          ntohl(mask->src));
1338                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1339                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
1340                          ntohl(key->src));
1341
1342                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1343                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1344                          ntohl(mask->dst));
1345                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1346                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1347                          ntohl(key->dst));
1348
1349                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1350                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1351         } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1352                 struct flow_dissector_key_ipv6_addrs *key =
1353                         skb_flow_dissector_target(f->dissector,
1354                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1355                                                   f->key);
1356                 struct flow_dissector_key_ipv6_addrs *mask =
1357                         skb_flow_dissector_target(f->dissector,
1358                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1359                                                   f->mask);
1360
1361                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1362                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
1363                        &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1364                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1365                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
1366                        &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1367
1368                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1369                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1370                        &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1371                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1372                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1373                        &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1374
1375                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1376                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1377         }
1378
1379         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
1380                 struct flow_dissector_key_ip *key =
1381                         skb_flow_dissector_target(f->dissector,
1382                                                   FLOW_DISSECTOR_KEY_ENC_IP,
1383                                                   f->key);
1384                 struct flow_dissector_key_ip *mask =
1385                         skb_flow_dissector_target(f->dissector,
1386                                                   FLOW_DISSECTOR_KEY_ENC_IP,
1387                                                   f->mask);
1388
1389                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1390                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1391
1392                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1393                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos  >> 2);
1394
1395                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1396                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1397
1398                 if (mask->ttl &&
1399                     !MLX5_CAP_ESW_FLOWTABLE_FDB
1400                         (priv->mdev,
1401                          ft_field_support.outer_ipv4_ttl)) {
1402                         NL_SET_ERR_MSG_MOD(extack,
1403                                            "Matching on TTL is not supported");
1404                         return -EOPNOTSUPP;
1405                 }
1406
1407         }
1408
1409         /* Enforce DMAC when offloading incoming tunneled flows.
1410          * Flow counters require a match on the DMAC.
1411          */
1412         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1413         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1414         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1415                                      dmac_47_16), priv->netdev->dev_addr);
1416
1417         /* let software handle IP fragments */
1418         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1419         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1420
1421         return 0;
1422 }
1423
1424 static int __parse_cls_flower(struct mlx5e_priv *priv,
1425                               struct mlx5_flow_spec *spec,
1426                               struct tc_cls_flower_offload *f,
1427                               struct net_device *filter_dev,
1428                               u8 *match_level)
1429 {
1430         struct netlink_ext_ack *extack = f->common.extack;
1431         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1432                                        outer_headers);
1433         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1434                                        outer_headers);
1435         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1436                                     misc_parameters);
1437         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1438                                     misc_parameters);
1439         u16 addr_type = 0;
1440         u8 ip_proto = 0;
1441
1442         *match_level = MLX5_MATCH_NONE;
1443
1444         if (f->dissector->used_keys &
1445             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1446               BIT(FLOW_DISSECTOR_KEY_BASIC) |
1447               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1448               BIT(FLOW_DISSECTOR_KEY_VLAN) |
1449               BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1450               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1451               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1452               BIT(FLOW_DISSECTOR_KEY_PORTS) |
1453               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1454               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1455               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1456               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1457               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1458               BIT(FLOW_DISSECTOR_KEY_TCP) |
1459               BIT(FLOW_DISSECTOR_KEY_IP)  |
1460               BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
1461                 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1462                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1463                             f->dissector->used_keys);
1464                 return -EOPNOTSUPP;
1465         }
1466
1467         if ((dissector_uses_key(f->dissector,
1468                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1469              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1470              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1471             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1472                 struct flow_dissector_key_control *key =
1473                         skb_flow_dissector_target(f->dissector,
1474                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
1475                                                   f->key);
1476                 switch (key->addr_type) {
1477                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1478                 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1479                         if (parse_tunnel_attr(priv, spec, f, filter_dev))
1480                                 return -EOPNOTSUPP;
1481                         break;
1482                 default:
1483                         return -EOPNOTSUPP;
1484                 }
1485
1486                 /* In decap flow, header pointers should point to the inner
1487                  * headers, outer header were already set by parse_tunnel_attr
1488                  */
1489                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1490                                          inner_headers);
1491                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1492                                          inner_headers);
1493         }
1494
1495         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1496                 struct flow_dissector_key_basic *key =
1497                         skb_flow_dissector_target(f->dissector,
1498                                                   FLOW_DISSECTOR_KEY_BASIC,
1499                                                   f->key);
1500                 struct flow_dissector_key_basic *mask =
1501                         skb_flow_dissector_target(f->dissector,
1502                                                   FLOW_DISSECTOR_KEY_BASIC,
1503                                                   f->mask);
1504                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1505                          ntohs(mask->n_proto));
1506                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1507                          ntohs(key->n_proto));
1508
1509                 if (mask->n_proto)
1510                         *match_level = MLX5_MATCH_L2;
1511         }
1512
1513         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1514                 struct flow_dissector_key_vlan *key =
1515                         skb_flow_dissector_target(f->dissector,
1516                                                   FLOW_DISSECTOR_KEY_VLAN,
1517                                                   f->key);
1518                 struct flow_dissector_key_vlan *mask =
1519                         skb_flow_dissector_target(f->dissector,
1520                                                   FLOW_DISSECTOR_KEY_VLAN,
1521                                                   f->mask);
1522                 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1523                         if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1524                                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1525                                          svlan_tag, 1);
1526                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1527                                          svlan_tag, 1);
1528                         } else {
1529                                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1530                                          cvlan_tag, 1);
1531                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1532                                          cvlan_tag, 1);
1533                         }
1534
1535                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1536                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1537
1538                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1539                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
1540
1541                         *match_level = MLX5_MATCH_L2;
1542                 }
1543         } else if (*match_level != MLX5_MATCH_NONE) {
1544                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1545                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1546                 *match_level = MLX5_MATCH_L2;
1547         }
1548
1549         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
1550                 struct flow_dissector_key_vlan *key =
1551                         skb_flow_dissector_target(f->dissector,
1552                                                   FLOW_DISSECTOR_KEY_CVLAN,
1553                                                   f->key);
1554                 struct flow_dissector_key_vlan *mask =
1555                         skb_flow_dissector_target(f->dissector,
1556                                                   FLOW_DISSECTOR_KEY_CVLAN,
1557                                                   f->mask);
1558                 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1559                         if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1560                                 MLX5_SET(fte_match_set_misc, misc_c,
1561                                          outer_second_svlan_tag, 1);
1562                                 MLX5_SET(fte_match_set_misc, misc_v,
1563                                          outer_second_svlan_tag, 1);
1564                         } else {
1565                                 MLX5_SET(fte_match_set_misc, misc_c,
1566                                          outer_second_cvlan_tag, 1);
1567                                 MLX5_SET(fte_match_set_misc, misc_v,
1568                                          outer_second_cvlan_tag, 1);
1569                         }
1570
1571                         MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1572                                  mask->vlan_id);
1573                         MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1574                                  key->vlan_id);
1575                         MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1576                                  mask->vlan_priority);
1577                         MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1578                                  key->vlan_priority);
1579
1580                         *match_level = MLX5_MATCH_L2;
1581                 }
1582         }
1583
1584         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1585                 struct flow_dissector_key_eth_addrs *key =
1586                         skb_flow_dissector_target(f->dissector,
1587                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
1588                                                   f->key);
1589                 struct flow_dissector_key_eth_addrs *mask =
1590                         skb_flow_dissector_target(f->dissector,
1591                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
1592                                                   f->mask);
1593
1594                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1595                                              dmac_47_16),
1596                                 mask->dst);
1597                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1598                                              dmac_47_16),
1599                                 key->dst);
1600
1601                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1602                                              smac_47_16),
1603                                 mask->src);
1604                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1605                                              smac_47_16),
1606                                 key->src);
1607
1608                 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1609                         *match_level = MLX5_MATCH_L2;
1610         }
1611
1612         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1613                 struct flow_dissector_key_control *key =
1614                         skb_flow_dissector_target(f->dissector,
1615                                                   FLOW_DISSECTOR_KEY_CONTROL,
1616                                                   f->key);
1617
1618                 struct flow_dissector_key_control *mask =
1619                         skb_flow_dissector_target(f->dissector,
1620                                                   FLOW_DISSECTOR_KEY_CONTROL,
1621                                                   f->mask);
1622                 addr_type = key->addr_type;
1623
1624                 /* the HW doesn't support frag first/later */
1625                 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1626                         return -EOPNOTSUPP;
1627
1628                 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1629                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1630                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1631                                  key->flags & FLOW_DIS_IS_FRAGMENT);
1632
1633                         /* the HW doesn't need L3 inline to match on frag=no */
1634                         if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
1635                                 *match_level = MLX5_MATCH_L2;
1636         /* ***  L2 attributes parsing up to here *** */
1637                         else
1638                                 *match_level = MLX5_MATCH_L3;
1639                 }
1640         }
1641
1642         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1643                 struct flow_dissector_key_basic *key =
1644                         skb_flow_dissector_target(f->dissector,
1645                                                   FLOW_DISSECTOR_KEY_BASIC,
1646                                                   f->key);
1647                 struct flow_dissector_key_basic *mask =
1648                         skb_flow_dissector_target(f->dissector,
1649                                                   FLOW_DISSECTOR_KEY_BASIC,
1650                                                   f->mask);
1651                 ip_proto = key->ip_proto;
1652
1653                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1654                          mask->ip_proto);
1655                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1656                          key->ip_proto);
1657
1658                 if (mask->ip_proto)
1659                         *match_level = MLX5_MATCH_L3;
1660         }
1661
1662         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1663                 struct flow_dissector_key_ipv4_addrs *key =
1664                         skb_flow_dissector_target(f->dissector,
1665                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1666                                                   f->key);
1667                 struct flow_dissector_key_ipv4_addrs *mask =
1668                         skb_flow_dissector_target(f->dissector,
1669                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1670                                                   f->mask);
1671
1672                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1673                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
1674                        &mask->src, sizeof(mask->src));
1675                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1676                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
1677                        &key->src, sizeof(key->src));
1678                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1679                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1680                        &mask->dst, sizeof(mask->dst));
1681                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1682                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1683                        &key->dst, sizeof(key->dst));
1684
1685                 if (mask->src || mask->dst)
1686                         *match_level = MLX5_MATCH_L3;
1687         }
1688
1689         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1690                 struct flow_dissector_key_ipv6_addrs *key =
1691                         skb_flow_dissector_target(f->dissector,
1692                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1693                                                   f->key);
1694                 struct flow_dissector_key_ipv6_addrs *mask =
1695                         skb_flow_dissector_target(f->dissector,
1696                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1697                                                   f->mask);
1698
1699                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1700                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
1701                        &mask->src, sizeof(mask->src));
1702                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1703                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
1704                        &key->src, sizeof(key->src));
1705
1706                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1707                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1708                        &mask->dst, sizeof(mask->dst));
1709                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1710                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1711                        &key->dst, sizeof(key->dst));
1712
1713                 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1714                     ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1715                         *match_level = MLX5_MATCH_L3;
1716         }
1717
1718         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1719                 struct flow_dissector_key_ip *key =
1720                         skb_flow_dissector_target(f->dissector,
1721                                                   FLOW_DISSECTOR_KEY_IP,
1722                                                   f->key);
1723                 struct flow_dissector_key_ip *mask =
1724                         skb_flow_dissector_target(f->dissector,
1725                                                   FLOW_DISSECTOR_KEY_IP,
1726                                                   f->mask);
1727
1728                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1729                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1730
1731                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1732                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos  >> 2);
1733
1734                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1735                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1736
1737                 if (mask->ttl &&
1738                     !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1739                                                 ft_field_support.outer_ipv4_ttl)) {
1740                         NL_SET_ERR_MSG_MOD(extack,
1741                                            "Matching on TTL is not supported");
1742                         return -EOPNOTSUPP;
1743                 }
1744
1745                 if (mask->tos || mask->ttl)
1746                         *match_level = MLX5_MATCH_L3;
1747         }
1748
1749         /* ***  L3 attributes parsing up to here *** */
1750
1751         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1752                 struct flow_dissector_key_ports *key =
1753                         skb_flow_dissector_target(f->dissector,
1754                                                   FLOW_DISSECTOR_KEY_PORTS,
1755                                                   f->key);
1756                 struct flow_dissector_key_ports *mask =
1757                         skb_flow_dissector_target(f->dissector,
1758                                                   FLOW_DISSECTOR_KEY_PORTS,
1759                                                   f->mask);
1760                 switch (ip_proto) {
1761                 case IPPROTO_TCP:
1762                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1763                                  tcp_sport, ntohs(mask->src));
1764                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1765                                  tcp_sport, ntohs(key->src));
1766
1767                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1768                                  tcp_dport, ntohs(mask->dst));
1769                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1770                                  tcp_dport, ntohs(key->dst));
1771                         break;
1772
1773                 case IPPROTO_UDP:
1774                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1775                                  udp_sport, ntohs(mask->src));
1776                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1777                                  udp_sport, ntohs(key->src));
1778
1779                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1780                                  udp_dport, ntohs(mask->dst));
1781                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1782                                  udp_dport, ntohs(key->dst));
1783                         break;
1784                 default:
1785                         NL_SET_ERR_MSG_MOD(extack,
1786                                            "Only UDP and TCP transports are supported for L4 matching");
1787                         netdev_err(priv->netdev,
1788                                    "Only UDP and TCP transport are supported\n");
1789                         return -EINVAL;
1790                 }
1791
1792                 if (mask->src || mask->dst)
1793                         *match_level = MLX5_MATCH_L4;
1794         }
1795
1796         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1797                 struct flow_dissector_key_tcp *key =
1798                         skb_flow_dissector_target(f->dissector,
1799                                                   FLOW_DISSECTOR_KEY_TCP,
1800                                                   f->key);
1801                 struct flow_dissector_key_tcp *mask =
1802                         skb_flow_dissector_target(f->dissector,
1803                                                   FLOW_DISSECTOR_KEY_TCP,
1804                                                   f->mask);
1805
1806                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1807                          ntohs(mask->flags));
1808                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1809                          ntohs(key->flags));
1810
1811                 if (mask->flags)
1812                         *match_level = MLX5_MATCH_L4;
1813         }
1814
1815         return 0;
1816 }
1817
1818 static int parse_cls_flower(struct mlx5e_priv *priv,
1819                             struct mlx5e_tc_flow *flow,
1820                             struct mlx5_flow_spec *spec,
1821                             struct tc_cls_flower_offload *f,
1822                             struct net_device *filter_dev)
1823 {
1824         struct netlink_ext_ack *extack = f->common.extack;
1825         struct mlx5_core_dev *dev = priv->mdev;
1826         struct mlx5_eswitch *esw = dev->priv.eswitch;
1827         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1828         struct mlx5_eswitch_rep *rep;
1829         u8 match_level;
1830         int err;
1831
1832         err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
1833
1834         if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1835                 rep = rpriv->rep;
1836                 if (rep->vport != FDB_UPLINK_VPORT &&
1837                     (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1838                     esw->offloads.inline_mode < match_level)) {
1839                         NL_SET_ERR_MSG_MOD(extack,
1840                                            "Flow is not offloaded due to min inline setting");
1841                         netdev_warn(priv->netdev,
1842                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1843                                     match_level, esw->offloads.inline_mode);
1844                         return -EOPNOTSUPP;
1845                 }
1846         }
1847
1848         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1849                 flow->esw_attr->match_level = match_level;
1850         else
1851                 flow->nic_attr->match_level = match_level;
1852
1853         return err;
1854 }
1855
1856 struct pedit_headers {
1857         struct ethhdr  eth;
1858         struct iphdr   ip4;
1859         struct ipv6hdr ip6;
1860         struct tcphdr  tcp;
1861         struct udphdr  udp;
1862 };
1863
1864 static int pedit_header_offsets[] = {
1865         [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1866         [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1867         [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1868         [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1869         [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1870 };
1871
1872 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1873
1874 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1875                          struct pedit_headers *masks,
1876                          struct pedit_headers *vals)
1877 {
1878         u32 *curr_pmask, *curr_pval;
1879
1880         if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1881                 goto out_err;
1882
1883         curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1884         curr_pval  = (u32 *)(pedit_header(vals, hdr_type) + offset);
1885
1886         if (*curr_pmask & mask)  /* disallow acting twice on the same location */
1887                 goto out_err;
1888
1889         *curr_pmask |= mask;
1890         *curr_pval  |= (val & mask);
1891
1892         return 0;
1893
1894 out_err:
1895         return -EOPNOTSUPP;
1896 }
1897
1898 struct mlx5_fields {
1899         u8  field;
1900         u8  size;
1901         u32 offset;
1902 };
1903
1904 #define OFFLOAD(fw_field, size, field, off) \
1905                 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1906
1907 static struct mlx5_fields fields[] = {
1908         OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1909         OFFLOAD(DMAC_15_0,  2, eth.h_dest[4], 0),
1910         OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1911         OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0),
1912         OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0),
1913
1914         OFFLOAD(IP_TTL, 1, ip4.ttl,   0),
1915         OFFLOAD(SIPV4,  4, ip4.saddr, 0),
1916         OFFLOAD(DIPV4,  4, ip4.daddr, 0),
1917
1918         OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1919         OFFLOAD(SIPV6_95_64,  4, ip6.saddr.s6_addr32[1], 0),
1920         OFFLOAD(SIPV6_63_32,  4, ip6.saddr.s6_addr32[2], 0),
1921         OFFLOAD(SIPV6_31_0,   4, ip6.saddr.s6_addr32[3], 0),
1922         OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1923         OFFLOAD(DIPV6_95_64,  4, ip6.daddr.s6_addr32[1], 0),
1924         OFFLOAD(DIPV6_63_32,  4, ip6.daddr.s6_addr32[2], 0),
1925         OFFLOAD(DIPV6_31_0,   4, ip6.daddr.s6_addr32[3], 0),
1926         OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1927
1928         OFFLOAD(TCP_SPORT, 2, tcp.source,  0),
1929         OFFLOAD(TCP_DPORT, 2, tcp.dest,    0),
1930         OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1931
1932         OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1933         OFFLOAD(UDP_DPORT, 2, udp.dest,   0),
1934 };
1935
1936 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1937  * max from the SW pedit action. On success, it says how many HW actions were
1938  * actually parsed.
1939  */
1940 static int offload_pedit_fields(struct pedit_headers *masks,
1941                                 struct pedit_headers *vals,
1942                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
1943                                 struct netlink_ext_ack *extack)
1944 {
1945         struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1946         int i, action_size, nactions, max_actions, first, last, next_z;
1947         void *s_masks_p, *a_masks_p, *vals_p;
1948         struct mlx5_fields *f;
1949         u8 cmd, field_bsize;
1950         u32 s_mask, a_mask;
1951         unsigned long mask;
1952         __be32 mask_be32;
1953         __be16 mask_be16;
1954         void *action;
1955
1956         set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1957         add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1958         set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1959         add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1960
1961         action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1962         action = parse_attr->mod_hdr_actions;
1963         max_actions = parse_attr->num_mod_hdr_actions;
1964         nactions = 0;
1965
1966         for (i = 0; i < ARRAY_SIZE(fields); i++) {
1967                 f = &fields[i];
1968                 /* avoid seeing bits set from previous iterations */
1969                 s_mask = 0;
1970                 a_mask = 0;
1971
1972                 s_masks_p = (void *)set_masks + f->offset;
1973                 a_masks_p = (void *)add_masks + f->offset;
1974
1975                 memcpy(&s_mask, s_masks_p, f->size);
1976                 memcpy(&a_mask, a_masks_p, f->size);
1977
1978                 if (!s_mask && !a_mask) /* nothing to offload here */
1979                         continue;
1980
1981                 if (s_mask && a_mask) {
1982                         NL_SET_ERR_MSG_MOD(extack,
1983                                            "can't set and add to the same HW field");
1984                         printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1985                         return -EOPNOTSUPP;
1986                 }
1987
1988                 if (nactions == max_actions) {
1989                         NL_SET_ERR_MSG_MOD(extack,
1990                                            "too many pedit actions, can't offload");
1991                         printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1992                         return -EOPNOTSUPP;
1993                 }
1994
1995                 if (s_mask) {
1996                         cmd  = MLX5_ACTION_TYPE_SET;
1997                         mask = s_mask;
1998                         vals_p = (void *)set_vals + f->offset;
1999                         /* clear to denote we consumed this field */
2000                         memset(s_masks_p, 0, f->size);
2001                 } else {
2002                         cmd  = MLX5_ACTION_TYPE_ADD;
2003                         mask = a_mask;
2004                         vals_p = (void *)add_vals + f->offset;
2005                         /* clear to denote we consumed this field */
2006                         memset(a_masks_p, 0, f->size);
2007                 }
2008
2009                 field_bsize = f->size * BITS_PER_BYTE;
2010
2011                 if (field_bsize == 32) {
2012                         mask_be32 = *(__be32 *)&mask;
2013                         mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2014                 } else if (field_bsize == 16) {
2015                         mask_be16 = *(__be16 *)&mask;
2016                         mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2017                 }
2018
2019                 first = find_first_bit(&mask, field_bsize);
2020                 next_z = find_next_zero_bit(&mask, field_bsize, first);
2021                 last  = find_last_bit(&mask, field_bsize);
2022                 if (first < next_z && next_z < last) {
2023                         NL_SET_ERR_MSG_MOD(extack,
2024                                            "rewrite of few sub-fields isn't supported");
2025                         printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2026                                mask);
2027                         return -EOPNOTSUPP;
2028                 }
2029
2030                 MLX5_SET(set_action_in, action, action_type, cmd);
2031                 MLX5_SET(set_action_in, action, field, f->field);
2032
2033                 if (cmd == MLX5_ACTION_TYPE_SET) {
2034                         MLX5_SET(set_action_in, action, offset, first);
2035                         /* length is num of bits to be written, zero means length of 32 */
2036                         MLX5_SET(set_action_in, action, length, (last - first + 1));
2037                 }
2038
2039                 if (field_bsize == 32)
2040                         MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2041                 else if (field_bsize == 16)
2042                         MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2043                 else if (field_bsize == 8)
2044                         MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2045
2046                 action += action_size;
2047                 nactions++;
2048         }
2049
2050         parse_attr->num_mod_hdr_actions = nactions;
2051         return 0;
2052 }
2053
2054 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2055                                  const struct tc_action *a, int namespace,
2056                                  struct mlx5e_tc_flow_parse_attr *parse_attr)
2057 {
2058         int nkeys, action_size, max_actions;
2059
2060         nkeys = tcf_pedit_nkeys(a);
2061         action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2062
2063         if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2064                 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
2065         else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2066                 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
2067
2068         /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2069         max_actions = min(max_actions, nkeys * 16);
2070
2071         parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2072         if (!parse_attr->mod_hdr_actions)
2073                 return -ENOMEM;
2074
2075         parse_attr->num_mod_hdr_actions = max_actions;
2076         return 0;
2077 }
2078
2079 static const struct pedit_headers zero_masks = {};
2080
2081 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2082                                  const struct tc_action *a, int namespace,
2083                                  struct mlx5e_tc_flow_parse_attr *parse_attr,
2084                                  struct netlink_ext_ack *extack)
2085 {
2086         struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
2087         int nkeys, i, err = -EOPNOTSUPP;
2088         u32 mask, val, offset;
2089         u8 cmd, htype;
2090
2091         nkeys = tcf_pedit_nkeys(a);
2092
2093         memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
2094         memset(vals,  0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
2095
2096         for (i = 0; i < nkeys; i++) {
2097                 htype = tcf_pedit_htype(a, i);
2098                 cmd = tcf_pedit_cmd(a, i);
2099                 err = -EOPNOTSUPP; /* can't be all optimistic */
2100
2101                 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
2102                         NL_SET_ERR_MSG_MOD(extack,
2103                                            "legacy pedit isn't offloaded");
2104                         goto out_err;
2105                 }
2106
2107                 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
2108                         NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
2109                         goto out_err;
2110                 }
2111
2112                 mask = tcf_pedit_mask(a, i);
2113                 val = tcf_pedit_val(a, i);
2114                 offset = tcf_pedit_offset(a, i);
2115
2116                 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
2117                 if (err)
2118                         goto out_err;
2119         }
2120
2121         err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2122         if (err)
2123                 goto out_err;
2124
2125         err = offload_pedit_fields(masks, vals, parse_attr, extack);
2126         if (err < 0)
2127                 goto out_dealloc_parsed_actions;
2128
2129         for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2130                 cmd_masks = &masks[cmd];
2131                 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2132                         NL_SET_ERR_MSG_MOD(extack,
2133                                            "attempt to offload an unsupported field");
2134                         netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2135                         print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2136                                        16, 1, cmd_masks, sizeof(zero_masks), true);
2137                         err = -EOPNOTSUPP;
2138                         goto out_dealloc_parsed_actions;
2139                 }
2140         }
2141
2142         return 0;
2143
2144 out_dealloc_parsed_actions:
2145         kfree(parse_attr->mod_hdr_actions);
2146 out_err:
2147         return err;
2148 }
2149
2150 static bool csum_offload_supported(struct mlx5e_priv *priv,
2151                                    u32 action,
2152                                    u32 update_flags,
2153                                    struct netlink_ext_ack *extack)
2154 {
2155         u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2156                          TCA_CSUM_UPDATE_FLAG_UDP;
2157
2158         /*  The HW recalcs checksums only if re-writing headers */
2159         if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2160                 NL_SET_ERR_MSG_MOD(extack,
2161                                    "TC csum action is only offloaded with pedit");
2162                 netdev_warn(priv->netdev,
2163                             "TC csum action is only offloaded with pedit\n");
2164                 return false;
2165         }
2166
2167         if (update_flags & ~prot_flags) {
2168                 NL_SET_ERR_MSG_MOD(extack,
2169                                    "can't offload TC csum action for some header/s");
2170                 netdev_warn(priv->netdev,
2171                             "can't offload TC csum action for some header/s - flags %#x\n",
2172                             update_flags);
2173                 return false;
2174         }
2175
2176         return true;
2177 }
2178
2179 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2180                                           struct tcf_exts *exts,
2181                                           struct netlink_ext_ack *extack)
2182 {
2183         const struct tc_action *a;
2184         bool modify_ip_header;
2185         LIST_HEAD(actions);
2186         u8 htype, ip_proto;
2187         void *headers_v;
2188         u16 ethertype;
2189         int nkeys, i;
2190
2191         headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2192         ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2193
2194         /* for non-IP we only re-write MACs, so we're okay */
2195         if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2196                 goto out_ok;
2197
2198         modify_ip_header = false;
2199         tcf_exts_for_each_action(i, a, exts) {
2200                 int k;
2201
2202                 if (!is_tcf_pedit(a))
2203                         continue;
2204
2205                 nkeys = tcf_pedit_nkeys(a);
2206                 for (k = 0; k < nkeys; k++) {
2207                         htype = tcf_pedit_htype(a, k);
2208                         if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
2209                             htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
2210                                 modify_ip_header = true;
2211                                 break;
2212                         }
2213                 }
2214         }
2215
2216         ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
2217         if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2218             ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2219                 NL_SET_ERR_MSG_MOD(extack,
2220                                    "can't offload re-write of non TCP/UDP");
2221                 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2222                 return false;
2223         }
2224
2225 out_ok:
2226         return true;
2227 }
2228
2229 static bool actions_match_supported(struct mlx5e_priv *priv,
2230                                     struct tcf_exts *exts,
2231                                     struct mlx5e_tc_flow_parse_attr *parse_attr,
2232                                     struct mlx5e_tc_flow *flow,
2233                                     struct netlink_ext_ack *extack)
2234 {
2235         u32 actions;
2236
2237         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2238                 actions = flow->esw_attr->action;
2239         else
2240                 actions = flow->nic_attr->action;
2241
2242         if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2243             !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2244                 return false;
2245
2246         if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2247                 return modify_header_match_supported(&parse_attr->spec, exts,
2248                                                      extack);
2249
2250         return true;
2251 }
2252
2253 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2254 {
2255         struct mlx5_core_dev *fmdev, *pmdev;
2256         u64 fsystem_guid, psystem_guid;
2257
2258         fmdev = priv->mdev;
2259         pmdev = peer_priv->mdev;
2260
2261         fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2262         psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
2263
2264         return (fsystem_guid == psystem_guid);
2265 }
2266
2267 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2268                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
2269                                 struct mlx5e_tc_flow *flow,
2270                                 struct netlink_ext_ack *extack)
2271 {
2272         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2273         const struct tc_action *a;
2274         LIST_HEAD(actions);
2275         u32 action = 0;
2276         int err, i;
2277
2278         if (!tcf_exts_has_actions(exts))
2279                 return -EINVAL;
2280
2281         attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2282
2283         tcf_exts_for_each_action(i, a, exts) {
2284                 if (is_tcf_gact_shot(a)) {
2285                         action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2286                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
2287                                                flow_table_properties_nic_receive.flow_counter))
2288                                 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2289                         continue;
2290                 }
2291
2292                 if (is_tcf_pedit(a)) {
2293                         err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
2294                                                     parse_attr, extack);
2295                         if (err)
2296                                 return err;
2297
2298                         action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2299                                   MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2300                         continue;
2301                 }
2302
2303                 if (is_tcf_csum(a)) {
2304                         if (csum_offload_supported(priv, action,
2305                                                    tcf_csum_update_flags(a),
2306                                                    extack))
2307                                 continue;
2308
2309                         return -EOPNOTSUPP;
2310                 }
2311
2312                 if (is_tcf_mirred_egress_redirect(a)) {
2313                         struct net_device *peer_dev = tcf_mirred_dev(a);
2314
2315                         if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2316                             same_hw_devs(priv, netdev_priv(peer_dev))) {
2317                                 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
2318                                 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
2319                                 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2320                                           MLX5_FLOW_CONTEXT_ACTION_COUNT;
2321                         } else {
2322                                 NL_SET_ERR_MSG_MOD(extack,
2323                                                    "device is not on same HW, can't offload");
2324                                 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2325                                             peer_dev->name);
2326                                 return -EINVAL;
2327                         }
2328                         continue;
2329                 }
2330
2331                 if (is_tcf_skbedit_mark(a)) {
2332                         u32 mark = tcf_skbedit_mark(a);
2333
2334                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2335                                 NL_SET_ERR_MSG_MOD(extack,
2336                                                    "Bad flow mark - only 16 bit is supported");
2337                                 return -EINVAL;
2338                         }
2339
2340                         attr->flow_tag = mark;
2341                         action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2342                         continue;
2343                 }
2344
2345                 return -EINVAL;
2346         }
2347
2348         attr->action = action;
2349         if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2350                 return -EOPNOTSUPP;
2351
2352         return 0;
2353 }
2354
2355 static inline int cmp_encap_info(struct ip_tunnel_key *a,
2356                                  struct ip_tunnel_key *b)
2357 {
2358         return memcmp(a, b, sizeof(*a));
2359 }
2360
2361 static inline int hash_encap_info(struct ip_tunnel_key *key)
2362 {
2363         return jhash(key, sizeof(*key), 0);
2364 }
2365
2366
2367 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2368                                   struct net_device *peer_netdev)
2369 {
2370         struct mlx5e_priv *peer_priv;
2371
2372         peer_priv = netdev_priv(peer_netdev);
2373
2374         return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2375                 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2376                 same_hw_devs(priv, peer_priv) &&
2377                 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2378                 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2379 }
2380
2381
2382
2383 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2384                               struct ip_tunnel_info *tun_info,
2385                               struct net_device *mirred_dev,
2386                               struct net_device **encap_dev,
2387                               struct mlx5e_tc_flow *flow,
2388                               struct netlink_ext_ack *extack,
2389                               int out_index)
2390 {
2391         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2392         unsigned short family = ip_tunnel_info_af(tun_info);
2393         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2394         struct ip_tunnel_key *key = &tun_info->key;
2395         struct mlx5e_encap_entry *e;
2396         uintptr_t hash_key;
2397         bool found = false;
2398         int err = 0;
2399
2400         hash_key = hash_encap_info(key);
2401
2402         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2403                                    encap_hlist, hash_key) {
2404                 if (!cmp_encap_info(&e->tun_info.key, key)) {
2405                         found = true;
2406                         break;
2407                 }
2408         }
2409
2410         /* must verify if encap is valid or not */
2411         if (found)
2412                 goto attach_flow;
2413
2414         e = kzalloc(sizeof(*e), GFP_KERNEL);
2415         if (!e)
2416                 return -ENOMEM;
2417
2418         e->tun_info = *tun_info;
2419         err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
2420         if (err)
2421                 goto out_err;
2422
2423         INIT_LIST_HEAD(&e->flows);
2424
2425         if (family == AF_INET)
2426                 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
2427         else if (family == AF_INET6)
2428                 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
2429
2430         if (err && err != -EAGAIN)
2431                 goto out_err;
2432
2433         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2434
2435 attach_flow:
2436         list_add(&flow->encaps[out_index].list, &e->flows);
2437         flow->encaps[out_index].index = out_index;
2438         *encap_dev = e->out_dev;
2439         if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
2440                 attr->dests[out_index].encap_id = e->encap_id;
2441                 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
2442         } else {
2443                 err = -EAGAIN;
2444         }
2445
2446         return err;
2447
2448 out_err:
2449         kfree(e);
2450         return err;
2451 }
2452
2453 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2454                                 const struct tc_action *a,
2455                                 struct mlx5_esw_flow_attr *attr,
2456                                 u32 *action)
2457 {
2458         u8 vlan_idx = attr->total_vlan;
2459
2460         if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2461                 return -EOPNOTSUPP;
2462
2463         if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2464                 if (vlan_idx) {
2465                         if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2466                                                                  MLX5_FS_VLAN_DEPTH))
2467                                 return -EOPNOTSUPP;
2468
2469                         *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2470                 } else {
2471                         *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2472                 }
2473         } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2474                 attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
2475                 attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
2476                 attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
2477                 if (!attr->vlan_proto[vlan_idx])
2478                         attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2479
2480                 if (vlan_idx) {
2481                         if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2482                                                                  MLX5_FS_VLAN_DEPTH))
2483                                 return -EOPNOTSUPP;
2484
2485                         *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2486                 } else {
2487                         if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2488                             (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2489                              tcf_vlan_push_prio(a)))
2490                                 return -EOPNOTSUPP;
2491
2492                         *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2493                 }
2494         } else { /* action is TCA_VLAN_ACT_MODIFY */
2495                 return -EOPNOTSUPP;
2496         }
2497
2498         attr->total_vlan = vlan_idx + 1;
2499
2500         return 0;
2501 }
2502
2503 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2504                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
2505                                 struct mlx5e_tc_flow *flow,
2506                                 struct netlink_ext_ack *extack)
2507 {
2508         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2509         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2510         struct mlx5e_rep_priv *rpriv = priv->ppriv;
2511         struct ip_tunnel_info *info = NULL;
2512         const struct tc_action *a;
2513         LIST_HEAD(actions);
2514         bool encap = false;
2515         u32 action = 0;
2516         int err, i;
2517
2518         if (!tcf_exts_has_actions(exts))
2519                 return -EINVAL;
2520
2521         attr->in_rep = rpriv->rep;
2522         attr->in_mdev = priv->mdev;
2523
2524         tcf_exts_for_each_action(i, a, exts) {
2525                 if (is_tcf_gact_shot(a)) {
2526                         action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2527                                   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2528                         continue;
2529                 }
2530
2531                 if (is_tcf_pedit(a)) {
2532                         err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2533                                                     parse_attr, extack);
2534                         if (err)
2535                                 return err;
2536
2537                         action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2538                         attr->split_count = attr->out_count;
2539                         continue;
2540                 }
2541
2542                 if (is_tcf_csum(a)) {
2543                         if (csum_offload_supported(priv, action,
2544                                                    tcf_csum_update_flags(a),
2545                                                    extack))
2546                                 continue;
2547
2548                         return -EOPNOTSUPP;
2549                 }
2550
2551                 if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
2552                         struct mlx5e_priv *out_priv;
2553                         struct net_device *out_dev;
2554
2555                         out_dev = tcf_mirred_dev(a);
2556                         if (!out_dev) {
2557                                 /* out_dev is NULL when filters with
2558                                  * non-existing mirred device are replayed to
2559                                  * the driver.
2560                                  */
2561                                 return -EINVAL;
2562                         }
2563
2564                         if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2565                                 NL_SET_ERR_MSG_MOD(extack,
2566                                                    "can't support more output ports, can't offload forwarding");
2567                                 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2568                                        attr->out_count);
2569                                 return -EOPNOTSUPP;
2570                         }
2571
2572                         action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2573                                   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2574                         if (switchdev_port_same_parent_id(priv->netdev,
2575                                                           out_dev) ||
2576                             is_merged_eswitch_dev(priv, out_dev)) {
2577                                 out_priv = netdev_priv(out_dev);
2578                                 rpriv = out_priv->ppriv;
2579                                 attr->dests[attr->out_count].rep = rpriv->rep;
2580                                 attr->dests[attr->out_count].mdev = out_priv->mdev;
2581                                 attr->out_count++;
2582                         } else if (encap) {
2583                                 parse_attr->mirred_ifindex[attr->out_count] =
2584                                         out_dev->ifindex;
2585                                 parse_attr->tun_info[attr->out_count] = *info;
2586                                 encap = false;
2587                                 attr->parse_attr = parse_attr;
2588                                 attr->dests[attr->out_count].flags |=
2589                                         MLX5_ESW_DEST_ENCAP;
2590                                 attr->out_count++;
2591                                 /* attr->dests[].rep is resolved when we
2592                                  * handle encap
2593                                  */
2594                         } else if (parse_attr->filter_dev != priv->netdev) {
2595                                 /* All mlx5 devices are called to configure
2596                                  * high level device filters. Therefore, the
2597                                  * *attempt* to  install a filter on invalid
2598                                  * eswitch should not trigger an explicit error
2599                                  */
2600                                 return -EINVAL;
2601                         } else {
2602                                 NL_SET_ERR_MSG_MOD(extack,
2603                                                    "devices are not on same switch HW, can't offload forwarding");
2604                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2605                                        priv->netdev->name, out_dev->name);
2606                                 return -EINVAL;
2607                         }
2608                         continue;
2609                 }
2610
2611                 if (is_tcf_tunnel_set(a)) {
2612                         info = tcf_tunnel_info(a);
2613                         if (info)
2614                                 encap = true;
2615                         else
2616                                 return -EOPNOTSUPP;
2617                         continue;
2618                 }
2619
2620                 if (is_tcf_vlan(a)) {
2621                         err = parse_tc_vlan_action(priv, a, attr, &action);
2622
2623                         if (err)
2624                                 return err;
2625
2626                         attr->split_count = attr->out_count;
2627                         continue;
2628                 }
2629
2630                 if (is_tcf_tunnel_release(a)) {
2631                         action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2632                         continue;
2633                 }
2634
2635                 if (is_tcf_gact_goto_chain(a)) {
2636                         u32 dest_chain = tcf_gact_goto_chain_index(a);
2637                         u32 max_chain = mlx5_eswitch_get_chain_range(esw);
2638
2639                         if (dest_chain <= attr->chain) {
2640                                 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
2641                                 return -EOPNOTSUPP;
2642                         }
2643                         if (dest_chain > max_chain) {
2644                                 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
2645                                 return -EOPNOTSUPP;
2646                         }
2647                         action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2648                                   MLX5_FLOW_CONTEXT_ACTION_COUNT;
2649                         attr->dest_chain = dest_chain;
2650
2651                         continue;
2652                 }
2653
2654                 return -EINVAL;
2655         }
2656
2657         attr->action = action;
2658         if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2659                 return -EOPNOTSUPP;
2660
2661         if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2662                 NL_SET_ERR_MSG_MOD(extack,
2663                                    "current firmware doesn't support split rule for port mirroring");
2664                 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2665                 return -EOPNOTSUPP;
2666         }
2667
2668         return 0;
2669 }
2670
2671 static void get_flags(int flags, u16 *flow_flags)
2672 {
2673         u16 __flow_flags = 0;
2674
2675         if (flags & MLX5E_TC_INGRESS)
2676                 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2677         if (flags & MLX5E_TC_EGRESS)
2678                 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2679
2680         *flow_flags = __flow_flags;
2681 }
2682
2683 static const struct rhashtable_params tc_ht_params = {
2684         .head_offset = offsetof(struct mlx5e_tc_flow, node),
2685         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2686         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2687         .automatic_shrinking = true,
2688 };
2689
2690 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
2691 {
2692         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2693         struct mlx5e_rep_priv *uplink_rpriv;
2694
2695         if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
2696                 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2697                 return &uplink_rpriv->uplink_priv.tc_ht;
2698         } else
2699                 return &priv->fs.tc.ht;
2700 }
2701
2702 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
2703 {
2704         return false;
2705 }
2706
2707 static int
2708 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
2709                  struct tc_cls_flower_offload *f, u16 flow_flags,
2710                  struct mlx5e_tc_flow_parse_attr **__parse_attr,
2711                  struct mlx5e_tc_flow **__flow)
2712 {
2713         struct mlx5e_tc_flow_parse_attr *parse_attr;
2714         struct mlx5e_tc_flow *flow;
2715         int err;
2716
2717         flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2718         parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2719         if (!parse_attr || !flow) {
2720                 err = -ENOMEM;
2721                 goto err_free;
2722         }
2723
2724         flow->cookie = f->cookie;
2725         flow->flags = flow_flags;
2726         flow->priv = priv;
2727
2728         *__flow = flow;
2729         *__parse_attr = parse_attr;
2730
2731         return 0;
2732
2733 err_free:
2734         kfree(flow);
2735         kvfree(parse_attr);
2736         return err;
2737 }
2738
2739 static int
2740 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2741                      struct tc_cls_flower_offload *f,
2742                      u16 flow_flags,
2743                      struct net_device *filter_dev,
2744                      struct mlx5_eswitch_rep *in_rep,
2745                      struct mlx5_core_dev *in_mdev,
2746                      struct mlx5e_tc_flow **__flow)
2747 {
2748         struct netlink_ext_ack *extack = f->common.extack;
2749         struct mlx5e_tc_flow_parse_attr *parse_attr;
2750         struct mlx5e_tc_flow *flow;
2751         int attr_size, err;
2752
2753         flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2754         attr_size  = sizeof(struct mlx5_esw_flow_attr);
2755         err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2756                                &parse_attr, &flow);
2757         if (err)
2758                 goto out;
2759         parse_attr->filter_dev = filter_dev;
2760         flow->esw_attr->parse_attr = parse_attr;
2761         err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2762                                f, filter_dev);
2763         if (err)
2764                 goto err_free;
2765
2766         flow->esw_attr->chain = f->common.chain_index;
2767         flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
2768         err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
2769         if (err)
2770                 goto err_free;
2771
2772         flow->esw_attr->in_rep = in_rep;
2773         flow->esw_attr->in_mdev = in_mdev;
2774         err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
2775         if (err)
2776                 goto err_free;
2777
2778         *__flow = flow;
2779
2780         return 0;
2781
2782 err_free:
2783         kfree(flow);
2784         kvfree(parse_attr);
2785 out:
2786         return err;
2787 }
2788
2789 static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
2790                                       struct mlx5e_tc_flow *flow)
2791 {
2792         struct mlx5e_priv *priv = flow->priv, *peer_priv;
2793         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
2794         struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
2795         struct mlx5e_tc_flow_parse_attr *parse_attr;
2796         struct mlx5e_rep_priv *peer_urpriv;
2797         struct mlx5e_tc_flow *peer_flow;
2798         struct mlx5_core_dev *in_mdev;
2799         int err = 0;
2800
2801         peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2802         if (!peer_esw)
2803                 return -ENODEV;
2804
2805         peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
2806         peer_priv = netdev_priv(peer_urpriv->netdev);
2807
2808         /* in_mdev is assigned of which the packet originated from.
2809          * So packets redirected to uplink use the same mdev of the
2810          * original flow and packets redirected from uplink use the
2811          * peer mdev.
2812          */
2813         if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT)
2814                 in_mdev = peer_priv->mdev;
2815         else
2816                 in_mdev = priv->mdev;
2817
2818         parse_attr = flow->esw_attr->parse_attr;
2819         err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
2820                                    parse_attr->filter_dev,
2821                                    flow->esw_attr->in_rep, in_mdev, &peer_flow);
2822         if (err)
2823                 goto out;
2824
2825         flow->peer_flow = peer_flow;
2826         flow->flags |= MLX5E_TC_FLOW_DUP;
2827         mutex_lock(&esw->offloads.peer_mutex);
2828         list_add_tail(&flow->peer, &esw->offloads.peer_flows);
2829         mutex_unlock(&esw->offloads.peer_mutex);
2830
2831 out:
2832         mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2833         return err;
2834 }
2835
2836 static int
2837 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2838                    struct tc_cls_flower_offload *f,
2839                    u16 flow_flags,
2840                    struct net_device *filter_dev,
2841                    struct mlx5e_tc_flow **__flow)
2842 {
2843         struct mlx5e_rep_priv *rpriv = priv->ppriv;
2844         struct mlx5_eswitch_rep *in_rep = rpriv->rep;
2845         struct mlx5_core_dev *in_mdev = priv->mdev;
2846         struct mlx5e_tc_flow *flow;
2847         int err;
2848
2849         err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
2850                                    in_mdev, &flow);
2851         if (err)
2852                 goto out;
2853
2854         if (is_peer_flow_needed(flow)) {
2855                 err = mlx5e_tc_add_fdb_peer_flow(f, flow);
2856                 if (err) {
2857                         mlx5e_tc_del_fdb_flow(priv, flow);
2858                         goto out;
2859                 }
2860         }
2861
2862         *__flow = flow;
2863
2864         return 0;
2865
2866 out:
2867         return err;
2868 }
2869
2870 static int
2871 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
2872                    struct tc_cls_flower_offload *f,
2873                    u16 flow_flags,
2874                    struct net_device *filter_dev,
2875                    struct mlx5e_tc_flow **__flow)
2876 {
2877         struct netlink_ext_ack *extack = f->common.extack;
2878         struct mlx5e_tc_flow_parse_attr *parse_attr;
2879         struct mlx5e_tc_flow *flow;
2880         int attr_size, err;
2881
2882         /* multi-chain not supported for NIC rules */
2883         if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
2884                 return -EOPNOTSUPP;
2885
2886         flow_flags |= MLX5E_TC_FLOW_NIC;
2887         attr_size  = sizeof(struct mlx5_nic_flow_attr);
2888         err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2889                                &parse_attr, &flow);
2890         if (err)
2891                 goto out;
2892
2893         parse_attr->filter_dev = filter_dev;
2894         err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2895                                f, filter_dev);
2896         if (err)
2897                 goto err_free;
2898
2899         err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
2900         if (err)
2901                 goto err_free;
2902
2903         err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
2904         if (err)
2905                 goto err_free;
2906
2907         flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2908         kvfree(parse_attr);
2909         *__flow = flow;
2910
2911         return 0;
2912
2913 err_free:
2914         kfree(flow);
2915         kvfree(parse_attr);
2916 out:
2917         return err;
2918 }
2919
2920 static int
2921 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
2922                   struct tc_cls_flower_offload *f,
2923                   int flags,
2924                   struct net_device *filter_dev,
2925                   struct mlx5e_tc_flow **flow)
2926 {
2927         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2928         u16 flow_flags;
2929         int err;
2930
2931         get_flags(flags, &flow_flags);
2932
2933         if (!tc_can_offload_extack(priv->netdev, f->common.extack))
2934                 return -EOPNOTSUPP;
2935
2936         if (esw && esw->mode == SRIOV_OFFLOADS)
2937                 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
2938                                          filter_dev, flow);
2939         else
2940                 err = mlx5e_add_nic_flow(priv, f, flow_flags,
2941                                          filter_dev, flow);
2942
2943         return err;
2944 }
2945
2946 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
2947                            struct tc_cls_flower_offload *f, int flags)
2948 {
2949         struct netlink_ext_ack *extack = f->common.extack;
2950         struct rhashtable *tc_ht = get_tc_ht(priv);
2951         struct mlx5e_tc_flow *flow;
2952         int err = 0;
2953
2954         flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2955         if (flow) {
2956                 NL_SET_ERR_MSG_MOD(extack,
2957                                    "flow cookie already exists, ignoring");
2958                 netdev_warn_once(priv->netdev,
2959                                  "flow cookie %lx already exists, ignoring\n",
2960                                  f->cookie);
2961                 goto out;
2962         }
2963
2964         err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
2965         if (err)
2966                 goto out;
2967
2968         err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
2969         if (err)
2970                 goto err_free;
2971
2972         return 0;
2973
2974 err_free:
2975         mlx5e_tc_del_flow(priv, flow);
2976         kfree(flow);
2977 out:
2978         return err;
2979 }
2980
2981 #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
2982 #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
2983
2984 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
2985 {
2986         if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
2987                 return true;
2988
2989         return false;
2990 }
2991
2992 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
2993                         struct tc_cls_flower_offload *f, int flags)
2994 {
2995         struct rhashtable *tc_ht = get_tc_ht(priv);
2996         struct mlx5e_tc_flow *flow;
2997
2998         flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2999         if (!flow || !same_flow_direction(flow, flags))
3000                 return -EINVAL;
3001
3002         rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3003
3004         mlx5e_tc_del_flow(priv, flow);
3005
3006         kfree(flow);
3007
3008         return 0;
3009 }
3010
3011 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3012                        struct tc_cls_flower_offload *f, int flags)
3013 {
3014         struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3015         struct rhashtable *tc_ht = get_tc_ht(priv);
3016         struct mlx5_eswitch *peer_esw;
3017         struct mlx5e_tc_flow *flow;
3018         struct mlx5_fc *counter;
3019         u64 bytes;
3020         u64 packets;
3021         u64 lastuse;
3022
3023         flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3024         if (!flow || !same_flow_direction(flow, flags))
3025                 return -EINVAL;
3026
3027         if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
3028                 return 0;
3029
3030         counter = mlx5e_tc_get_counter(flow);
3031         if (!counter)
3032                 return 0;
3033
3034         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3035
3036         peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3037         if (!peer_esw)
3038                 goto out;
3039
3040         if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
3041             (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
3042                 u64 bytes2;
3043                 u64 packets2;
3044                 u64 lastuse2;
3045
3046                 counter = mlx5e_tc_get_counter(flow->peer_flow);
3047                 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
3048
3049                 bytes += bytes2;
3050                 packets += packets2;
3051                 lastuse = max_t(u64, lastuse, lastuse2);
3052         }
3053
3054         mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3055
3056 out:
3057         tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
3058
3059         return 0;
3060 }
3061
3062 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3063                                               struct mlx5e_priv *peer_priv)
3064 {
3065         struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3066         struct mlx5e_hairpin_entry *hpe;
3067         u16 peer_vhca_id;
3068         int bkt;
3069
3070         if (!same_hw_devs(priv, peer_priv))
3071                 return;
3072
3073         peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3074
3075         hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
3076                 if (hpe->peer_vhca_id == peer_vhca_id)
3077                         hpe->hp->pair->peer_gone = true;
3078         }
3079 }
3080
3081 static int mlx5e_tc_netdev_event(struct notifier_block *this,
3082                                  unsigned long event, void *ptr)
3083 {
3084         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3085         struct mlx5e_flow_steering *fs;
3086         struct mlx5e_priv *peer_priv;
3087         struct mlx5e_tc_table *tc;
3088         struct mlx5e_priv *priv;
3089
3090         if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3091             event != NETDEV_UNREGISTER ||
3092             ndev->reg_state == NETREG_REGISTERED)
3093                 return NOTIFY_DONE;
3094
3095         tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3096         fs = container_of(tc, struct mlx5e_flow_steering, tc);
3097         priv = container_of(fs, struct mlx5e_priv, fs);
3098         peer_priv = netdev_priv(ndev);
3099         if (priv == peer_priv ||
3100             !(priv->netdev->features & NETIF_F_HW_TC))
3101                 return NOTIFY_DONE;
3102
3103         mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3104
3105         return NOTIFY_DONE;
3106 }
3107
3108 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
3109 {
3110         struct mlx5e_tc_table *tc = &priv->fs.tc;
3111         int err;
3112
3113         hash_init(tc->mod_hdr_tbl);
3114         hash_init(tc->hairpin_tbl);
3115
3116         err = rhashtable_init(&tc->ht, &tc_ht_params);
3117         if (err)
3118                 return err;
3119
3120         tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3121         if (register_netdevice_notifier(&tc->netdevice_nb)) {
3122                 tc->netdevice_nb.notifier_call = NULL;
3123                 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3124         }
3125
3126         return err;
3127 }
3128
3129 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3130 {
3131         struct mlx5e_tc_flow *flow = ptr;
3132         struct mlx5e_priv *priv = flow->priv;
3133
3134         mlx5e_tc_del_flow(priv, flow);
3135         kfree(flow);
3136 }
3137
3138 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
3139 {
3140         struct mlx5e_tc_table *tc = &priv->fs.tc;
3141
3142         if (tc->netdevice_nb.notifier_call)
3143                 unregister_netdevice_notifier(&tc->netdevice_nb);
3144
3145         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
3146
3147         if (!IS_ERR_OR_NULL(tc->t)) {
3148                 mlx5_destroy_flow_table(tc->t);
3149                 tc->t = NULL;
3150         }
3151 }
3152
3153 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3154 {
3155         return rhashtable_init(tc_ht, &tc_ht_params);
3156 }
3157
3158 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3159 {
3160         rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3161 }
3162
3163 int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
3164 {
3165         struct rhashtable *tc_ht = get_tc_ht(priv);
3166
3167         return atomic_read(&tc_ht->nelems);
3168 }
3169
3170 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
3171 {
3172         struct mlx5e_tc_flow *flow, *tmp;
3173
3174         list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
3175                 __mlx5e_tc_del_fdb_peer_flow(flow);
3176 }