Merge tag 'mlx5-updates-2020-02-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
39 #include <net/arp.h>
40 #include <net/devlink.h>
41 #include <net/ipv6_stubs.h>
42
43 #include "eswitch.h"
44 #include "eswitch_offloads_chains.h"
45 #include "en.h"
46 #include "en_rep.h"
47 #include "en_tc.h"
48 #include "en/tc_tun.h"
49 #include "fs_core.h"
50 #include "lib/port_tun.h"
51 #include "lib/mlx5.h"
52 #define CREATE_TRACE_POINTS
53 #include "diag/en_rep_tracepoint.h"
54
55 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
56         max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
57 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
58
59 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
60
61 struct mlx5e_rep_indr_block_priv {
62         struct net_device *netdev;
63         struct mlx5e_rep_priv *rpriv;
64
65         struct list_head list;
66 };
67
68 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
69                                             struct net_device *netdev);
70
71 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
72                                   struct ethtool_drvinfo *drvinfo)
73 {
74         struct mlx5e_priv *priv = netdev_priv(dev);
75         struct mlx5_core_dev *mdev = priv->mdev;
76
77         strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
78                 sizeof(drvinfo->driver));
79         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
80         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
81                  "%d.%d.%04d (%.16s)",
82                  fw_rev_maj(mdev), fw_rev_min(mdev),
83                  fw_rev_sub(mdev), mdev->board_id);
84 }
85
86 static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
87                                          struct ethtool_drvinfo *drvinfo)
88 {
89         struct mlx5e_priv *priv = netdev_priv(dev);
90
91         mlx5e_rep_get_drvinfo(dev, drvinfo);
92         strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
93                 sizeof(drvinfo->bus_info));
94 }
95
96 static const struct counter_desc sw_rep_stats_desc[] = {
97         { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
98         { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
99         { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
100         { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
101 };
102
103 struct vport_stats {
104         u64 vport_rx_packets;
105         u64 vport_tx_packets;
106         u64 vport_rx_bytes;
107         u64 vport_tx_bytes;
108 };
109
110 static const struct counter_desc vport_rep_stats_desc[] = {
111         { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
112         { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
113         { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
114         { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
115 };
116
117 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
118 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
119
120 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
121 {
122         return NUM_VPORT_REP_SW_COUNTERS;
123 }
124
125 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
126 {
127         int i;
128
129         for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
130                 strcpy(data + (idx++) * ETH_GSTRING_LEN,
131                        sw_rep_stats_desc[i].format);
132         return idx;
133 }
134
135 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
136 {
137         int i;
138
139         for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
140                 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
141                                                    sw_rep_stats_desc, i);
142         return idx;
143 }
144
145 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
146 {
147         struct mlx5e_sw_stats *s = &priv->stats.sw;
148         struct rtnl_link_stats64 stats64 = {};
149
150         memset(s, 0, sizeof(*s));
151         mlx5e_fold_sw_stats64(priv, &stats64);
152
153         s->rx_packets = stats64.rx_packets;
154         s->rx_bytes   = stats64.rx_bytes;
155         s->tx_packets = stats64.tx_packets;
156         s->tx_bytes   = stats64.tx_bytes;
157         s->tx_queue_dropped = stats64.tx_dropped;
158 }
159
160 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
161 {
162         return NUM_VPORT_REP_HW_COUNTERS;
163 }
164
165 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
166 {
167         int i;
168
169         for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
170                 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
171         return idx;
172 }
173
174 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
175 {
176         int i;
177
178         for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
179                 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
180                                                    vport_rep_stats_desc, i);
181         return idx;
182 }
183
184 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
185 {
186         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
187         struct mlx5e_rep_priv *rpriv = priv->ppriv;
188         struct mlx5_eswitch_rep *rep = rpriv->rep;
189         struct rtnl_link_stats64 *vport_stats;
190         struct ifla_vf_stats vf_stats;
191         int err;
192
193         err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
194         if (err) {
195                 netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
196                             rep->vport, err);
197                 return;
198         }
199
200         vport_stats = &priv->stats.vf_vport;
201         /* flip tx/rx as we are reporting the counters for the switch vport */
202         vport_stats->rx_packets = vf_stats.tx_packets;
203         vport_stats->rx_bytes   = vf_stats.tx_bytes;
204         vport_stats->tx_packets = vf_stats.rx_packets;
205         vport_stats->tx_bytes   = vf_stats.rx_bytes;
206 }
207
208 static void mlx5e_rep_get_strings(struct net_device *dev,
209                                   u32 stringset, uint8_t *data)
210 {
211         struct mlx5e_priv *priv = netdev_priv(dev);
212
213         switch (stringset) {
214         case ETH_SS_STATS:
215                 mlx5e_stats_fill_strings(priv, data);
216                 break;
217         }
218 }
219
220 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
221                                         struct ethtool_stats *stats, u64 *data)
222 {
223         struct mlx5e_priv *priv = netdev_priv(dev);
224
225         mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
226 }
227
228 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
229 {
230         struct mlx5e_priv *priv = netdev_priv(dev);
231
232         switch (sset) {
233         case ETH_SS_STATS:
234                 return mlx5e_stats_total_num(priv);
235         default:
236                 return -EOPNOTSUPP;
237         }
238 }
239
240 static void mlx5e_rep_get_ringparam(struct net_device *dev,
241                                 struct ethtool_ringparam *param)
242 {
243         struct mlx5e_priv *priv = netdev_priv(dev);
244
245         mlx5e_ethtool_get_ringparam(priv, param);
246 }
247
248 static int mlx5e_rep_set_ringparam(struct net_device *dev,
249                                struct ethtool_ringparam *param)
250 {
251         struct mlx5e_priv *priv = netdev_priv(dev);
252
253         return mlx5e_ethtool_set_ringparam(priv, param);
254 }
255
256 static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
257                                            struct mlx5_flow_destination *dest)
258 {
259         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
260         struct mlx5e_rep_priv *rpriv = priv->ppriv;
261         struct mlx5_eswitch_rep *rep = rpriv->rep;
262         struct mlx5_flow_handle *flow_rule;
263
264         flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
265                                                       rep->vport,
266                                                       dest);
267         if (IS_ERR(flow_rule))
268                 return PTR_ERR(flow_rule);
269
270         mlx5_del_flow_rules(rpriv->vport_rx_rule);
271         rpriv->vport_rx_rule = flow_rule;
272         return 0;
273 }
274
275 static void mlx5e_rep_get_channels(struct net_device *dev,
276                                    struct ethtool_channels *ch)
277 {
278         struct mlx5e_priv *priv = netdev_priv(dev);
279
280         mlx5e_ethtool_get_channels(priv, ch);
281 }
282
283 static int mlx5e_rep_set_channels(struct net_device *dev,
284                                   struct ethtool_channels *ch)
285 {
286         struct mlx5e_priv *priv = netdev_priv(dev);
287         u16 curr_channels_amount = priv->channels.params.num_channels;
288         u32 new_channels_amount = ch->combined_count;
289         struct mlx5_flow_destination new_dest;
290         int err = 0;
291
292         err = mlx5e_ethtool_set_channels(priv, ch);
293         if (err)
294                 return err;
295
296         if (curr_channels_amount == 1 && new_channels_amount > 1) {
297                 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
298                 new_dest.ft = priv->fs.ttc.ft.t;
299         } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
300                 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
301                 new_dest.tir_num = priv->direct_tir[0].tirn;
302         } else {
303                 return 0;
304         }
305
306         err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
307         if (err) {
308                 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
309                             curr_channels_amount, new_channels_amount);
310                 return err;
311         }
312
313         return 0;
314 }
315
316 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
317                                   struct ethtool_coalesce *coal)
318 {
319         struct mlx5e_priv *priv = netdev_priv(netdev);
320
321         return mlx5e_ethtool_get_coalesce(priv, coal);
322 }
323
324 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
325                                   struct ethtool_coalesce *coal)
326 {
327         struct mlx5e_priv *priv = netdev_priv(netdev);
328
329         return mlx5e_ethtool_set_coalesce(priv, coal);
330 }
331
332 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
333 {
334         struct mlx5e_priv *priv = netdev_priv(netdev);
335
336         return mlx5e_ethtool_get_rxfh_key_size(priv);
337 }
338
339 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
340 {
341         struct mlx5e_priv *priv = netdev_priv(netdev);
342
343         return mlx5e_ethtool_get_rxfh_indir_size(priv);
344 }
345
346 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
347                                             struct ethtool_pauseparam *pauseparam)
348 {
349         struct mlx5e_priv *priv = netdev_priv(netdev);
350
351         mlx5e_ethtool_get_pauseparam(priv, pauseparam);
352 }
353
354 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
355                                            struct ethtool_pauseparam *pauseparam)
356 {
357         struct mlx5e_priv *priv = netdev_priv(netdev);
358
359         return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
360 }
361
362 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
363                                                struct ethtool_link_ksettings *link_ksettings)
364 {
365         struct mlx5e_priv *priv = netdev_priv(netdev);
366
367         return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
368 }
369
370 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
371                                                const struct ethtool_link_ksettings *link_ksettings)
372 {
373         struct mlx5e_priv *priv = netdev_priv(netdev);
374
375         return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
376 }
377
378 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
379         .get_drvinfo       = mlx5e_rep_get_drvinfo,
380         .get_link          = ethtool_op_get_link,
381         .get_strings       = mlx5e_rep_get_strings,
382         .get_sset_count    = mlx5e_rep_get_sset_count,
383         .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
384         .get_ringparam     = mlx5e_rep_get_ringparam,
385         .set_ringparam     = mlx5e_rep_set_ringparam,
386         .get_channels      = mlx5e_rep_get_channels,
387         .set_channels      = mlx5e_rep_set_channels,
388         .get_coalesce      = mlx5e_rep_get_coalesce,
389         .set_coalesce      = mlx5e_rep_set_coalesce,
390         .get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
391         .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
392 };
393
394 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
395         .get_drvinfo       = mlx5e_uplink_rep_get_drvinfo,
396         .get_link          = ethtool_op_get_link,
397         .get_strings       = mlx5e_rep_get_strings,
398         .get_sset_count    = mlx5e_rep_get_sset_count,
399         .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
400         .get_ringparam     = mlx5e_rep_get_ringparam,
401         .set_ringparam     = mlx5e_rep_set_ringparam,
402         .get_channels      = mlx5e_rep_get_channels,
403         .set_channels      = mlx5e_rep_set_channels,
404         .get_coalesce      = mlx5e_rep_get_coalesce,
405         .set_coalesce      = mlx5e_rep_set_coalesce,
406         .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
407         .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
408         .get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
409         .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
410         .get_pauseparam    = mlx5e_uplink_rep_get_pauseparam,
411         .set_pauseparam    = mlx5e_uplink_rep_set_pauseparam,
412 };
413
414 static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
415                                          struct netdev_phys_item_id *ppid)
416 {
417         struct mlx5e_priv *priv;
418         u64 parent_id;
419
420         priv = netdev_priv(dev);
421
422         parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
423         ppid->id_len = sizeof(parent_id);
424         memcpy(ppid->id, &parent_id, sizeof(parent_id));
425 }
426
427 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
428                                  struct mlx5_eswitch_rep *rep)
429 {
430         struct mlx5e_rep_sq *rep_sq, *tmp;
431         struct mlx5e_rep_priv *rpriv;
432
433         if (esw->mode != MLX5_ESWITCH_OFFLOADS)
434                 return;
435
436         rpriv = mlx5e_rep_to_rep_priv(rep);
437         list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
438                 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
439                 list_del(&rep_sq->list);
440                 kfree(rep_sq);
441         }
442 }
443
444 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
445                                  struct mlx5_eswitch_rep *rep,
446                                  u32 *sqns_array, int sqns_num)
447 {
448         struct mlx5_flow_handle *flow_rule;
449         struct mlx5e_rep_priv *rpriv;
450         struct mlx5e_rep_sq *rep_sq;
451         int err;
452         int i;
453
454         if (esw->mode != MLX5_ESWITCH_OFFLOADS)
455                 return 0;
456
457         rpriv = mlx5e_rep_to_rep_priv(rep);
458         for (i = 0; i < sqns_num; i++) {
459                 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
460                 if (!rep_sq) {
461                         err = -ENOMEM;
462                         goto out_err;
463                 }
464
465                 /* Add re-inject rule to the PF/representor sqs */
466                 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
467                                                                 rep->vport,
468                                                                 sqns_array[i]);
469                 if (IS_ERR(flow_rule)) {
470                         err = PTR_ERR(flow_rule);
471                         kfree(rep_sq);
472                         goto out_err;
473                 }
474                 rep_sq->send_to_vport_rule = flow_rule;
475                 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
476         }
477         return 0;
478
479 out_err:
480         mlx5e_sqs2vport_stop(esw, rep);
481         return err;
482 }
483
484 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
485 {
486         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
487         struct mlx5e_rep_priv *rpriv = priv->ppriv;
488         struct mlx5_eswitch_rep *rep = rpriv->rep;
489         struct mlx5e_channel *c;
490         int n, tc, num_sqs = 0;
491         int err = -ENOMEM;
492         u32 *sqs;
493
494         sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
495         if (!sqs)
496                 goto out;
497
498         for (n = 0; n < priv->channels.num; n++) {
499                 c = priv->channels.c[n];
500                 for (tc = 0; tc < c->num_tc; tc++)
501                         sqs[num_sqs++] = c->sq[tc].sqn;
502         }
503
504         err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
505         kfree(sqs);
506
507 out:
508         if (err)
509                 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
510         return err;
511 }
512
513 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
514 {
515         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
516         struct mlx5e_rep_priv *rpriv = priv->ppriv;
517         struct mlx5_eswitch_rep *rep = rpriv->rep;
518
519         mlx5e_sqs2vport_stop(esw, rep);
520 }
521
522 static unsigned long mlx5e_rep_ipv6_interval(void)
523 {
524         if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
525                 return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
526
527         return ~0UL;
528 }
529
530 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
531 {
532         unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
533         unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
534         struct net_device *netdev = rpriv->netdev;
535         struct mlx5e_priv *priv = netdev_priv(netdev);
536
537         rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
538         mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
539 }
540
541 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
542 {
543         struct mlx5e_rep_priv *rpriv = priv->ppriv;
544         struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
545
546         mlx5_fc_queue_stats_work(priv->mdev,
547                                  &neigh_update->neigh_stats_work,
548                                  neigh_update->min_interval);
549 }
550
551 static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
552 {
553         return refcount_inc_not_zero(&nhe->refcnt);
554 }
555
556 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
557
558 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
559 {
560         if (refcount_dec_and_test(&nhe->refcnt)) {
561                 mlx5e_rep_neigh_entry_remove(nhe);
562                 kfree_rcu(nhe, rcu);
563         }
564 }
565
566 static struct mlx5e_neigh_hash_entry *
567 mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
568                    struct mlx5e_neigh_hash_entry *nhe)
569 {
570         struct mlx5e_neigh_hash_entry *next = NULL;
571
572         rcu_read_lock();
573
574         for (next = nhe ?
575                      list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
576                                            &nhe->neigh_list,
577                                            struct mlx5e_neigh_hash_entry,
578                                            neigh_list) :
579                      list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
580                                             struct mlx5e_neigh_hash_entry,
581                                             neigh_list);
582              next;
583              next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
584                                           &next->neigh_list,
585                                           struct mlx5e_neigh_hash_entry,
586                                           neigh_list))
587                 if (mlx5e_rep_neigh_entry_hold(next))
588                         break;
589
590         rcu_read_unlock();
591
592         if (nhe)
593                 mlx5e_rep_neigh_entry_release(nhe);
594
595         return next;
596 }
597
598 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
599 {
600         struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
601                                                     neigh_update.neigh_stats_work.work);
602         struct net_device *netdev = rpriv->netdev;
603         struct mlx5e_priv *priv = netdev_priv(netdev);
604         struct mlx5e_neigh_hash_entry *nhe = NULL;
605
606         rtnl_lock();
607         if (!list_empty(&rpriv->neigh_update.neigh_list))
608                 mlx5e_rep_queue_neigh_stats_work(priv);
609
610         while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
611                 mlx5e_tc_update_neigh_used_value(nhe);
612
613         rtnl_unlock();
614 }
615
616 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
617                                    struct mlx5e_encap_entry *e,
618                                    bool neigh_connected,
619                                    unsigned char ha[ETH_ALEN])
620 {
621         struct ethhdr *eth = (struct ethhdr *)e->encap_header;
622         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
623         bool encap_connected;
624         LIST_HEAD(flow_list);
625
626         ASSERT_RTNL();
627
628         /* wait for encap to be fully initialized */
629         wait_for_completion(&e->res_ready);
630
631         mutex_lock(&esw->offloads.encap_tbl_lock);
632         encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
633         if (e->compl_result < 0 || (encap_connected == neigh_connected &&
634                                     ether_addr_equal(e->h_dest, ha)))
635                 goto unlock;
636
637         mlx5e_take_all_encap_flows(e, &flow_list);
638
639         if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
640             (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
641                 mlx5e_tc_encap_flows_del(priv, e, &flow_list);
642
643         if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
644                 ether_addr_copy(e->h_dest, ha);
645                 ether_addr_copy(eth->h_dest, ha);
646                 /* Update the encap source mac, in case that we delete
647                  * the flows when encap source mac changed.
648                  */
649                 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
650
651                 mlx5e_tc_encap_flows_add(priv, e, &flow_list);
652         }
653 unlock:
654         mutex_unlock(&esw->offloads.encap_tbl_lock);
655         mlx5e_put_encap_flow_list(priv, &flow_list);
656 }
657
658 static void mlx5e_rep_neigh_update(struct work_struct *work)
659 {
660         struct mlx5e_neigh_hash_entry *nhe =
661                 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
662         struct neighbour *n = nhe->n;
663         struct mlx5e_encap_entry *e;
664         unsigned char ha[ETH_ALEN];
665         struct mlx5e_priv *priv;
666         bool neigh_connected;
667         u8 nud_state, dead;
668
669         rtnl_lock();
670
671         /* If these parameters are changed after we release the lock,
672          * we'll receive another event letting us know about it.
673          * We use this lock to avoid inconsistency between the neigh validity
674          * and it's hw address.
675          */
676         read_lock_bh(&n->lock);
677         memcpy(ha, n->ha, ETH_ALEN);
678         nud_state = n->nud_state;
679         dead = n->dead;
680         read_unlock_bh(&n->lock);
681
682         neigh_connected = (nud_state & NUD_VALID) && !dead;
683
684         trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
685
686         list_for_each_entry(e, &nhe->encap_list, encap_list) {
687                 if (!mlx5e_encap_take(e))
688                         continue;
689
690                 priv = netdev_priv(e->out_dev);
691                 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
692                 mlx5e_encap_put(priv, e);
693         }
694         mlx5e_rep_neigh_entry_release(nhe);
695         rtnl_unlock();
696         neigh_release(n);
697 }
698
699 static struct mlx5e_rep_indr_block_priv *
700 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
701                                  struct net_device *netdev)
702 {
703         struct mlx5e_rep_indr_block_priv *cb_priv;
704
705         /* All callback list access should be protected by RTNL. */
706         ASSERT_RTNL();
707
708         list_for_each_entry(cb_priv,
709                             &rpriv->uplink_priv.tc_indr_block_priv_list,
710                             list)
711                 if (cb_priv->netdev == netdev)
712                         return cb_priv;
713
714         return NULL;
715 }
716
717 static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
718 {
719         struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
720         struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
721
722         list_for_each_entry_safe(cb_priv, temp, head, list) {
723                 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
724                 kfree(cb_priv);
725         }
726 }
727
728 static int
729 mlx5e_rep_indr_offload(struct net_device *netdev,
730                        struct flow_cls_offload *flower,
731                        struct mlx5e_rep_indr_block_priv *indr_priv)
732 {
733         unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
734         struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
735         int err = 0;
736
737         switch (flower->command) {
738         case FLOW_CLS_REPLACE:
739                 err = mlx5e_configure_flower(netdev, priv, flower, flags);
740                 break;
741         case FLOW_CLS_DESTROY:
742                 err = mlx5e_delete_flower(netdev, priv, flower, flags);
743                 break;
744         case FLOW_CLS_STATS:
745                 err = mlx5e_stats_flower(netdev, priv, flower, flags);
746                 break;
747         default:
748                 err = -EOPNOTSUPP;
749         }
750
751         return err;
752 }
753
754 static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
755                                          void *type_data, void *indr_priv)
756 {
757         struct mlx5e_rep_indr_block_priv *priv = indr_priv;
758
759         switch (type) {
760         case TC_SETUP_CLSFLOWER:
761                 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
762         default:
763                 return -EOPNOTSUPP;
764         }
765 }
766
767 static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
768 {
769         struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
770
771         list_del(&indr_priv->list);
772         kfree(indr_priv);
773 }
774
775 static LIST_HEAD(mlx5e_block_cb_list);
776
777 static int
778 mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
779                               struct mlx5e_rep_priv *rpriv,
780                               struct flow_block_offload *f)
781 {
782         struct mlx5e_rep_indr_block_priv *indr_priv;
783         struct flow_block_cb *block_cb;
784
785         if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
786                 return -EOPNOTSUPP;
787
788         f->unlocked_driver_cb = true;
789         f->driver_block_list = &mlx5e_block_cb_list;
790
791         switch (f->command) {
792         case FLOW_BLOCK_BIND:
793                 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
794                 if (indr_priv)
795                         return -EEXIST;
796
797                 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
798                 if (!indr_priv)
799                         return -ENOMEM;
800
801                 indr_priv->netdev = netdev;
802                 indr_priv->rpriv = rpriv;
803                 list_add(&indr_priv->list,
804                          &rpriv->uplink_priv.tc_indr_block_priv_list);
805
806                 block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
807                                                indr_priv, indr_priv,
808                                                mlx5e_rep_indr_tc_block_unbind);
809                 if (IS_ERR(block_cb)) {
810                         list_del(&indr_priv->list);
811                         kfree(indr_priv);
812                         return PTR_ERR(block_cb);
813                 }
814                 flow_block_cb_add(block_cb, f);
815                 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
816
817                 return 0;
818         case FLOW_BLOCK_UNBIND:
819                 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
820                 if (!indr_priv)
821                         return -ENOENT;
822
823                 block_cb = flow_block_cb_lookup(f->block,
824                                                 mlx5e_rep_indr_setup_block_cb,
825                                                 indr_priv);
826                 if (!block_cb)
827                         return -ENOENT;
828
829                 flow_block_cb_remove(block_cb, f);
830                 list_del(&block_cb->driver_list);
831                 return 0;
832         default:
833                 return -EOPNOTSUPP;
834         }
835         return 0;
836 }
837
838 static
839 int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
840                                enum tc_setup_type type, void *type_data)
841 {
842         switch (type) {
843         case TC_SETUP_BLOCK:
844                 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
845                                                       type_data);
846         default:
847                 return -EOPNOTSUPP;
848         }
849 }
850
851 static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
852                                          struct net_device *netdev)
853 {
854         int err;
855
856         err = __flow_indr_block_cb_register(netdev, rpriv,
857                                             mlx5e_rep_indr_setup_tc_cb,
858                                             rpriv);
859         if (err) {
860                 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
861
862                 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
863                               netdev_name(netdev), err);
864         }
865         return err;
866 }
867
868 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
869                                             struct net_device *netdev)
870 {
871         __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
872                                         rpriv);
873 }
874
875 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
876                                          unsigned long event, void *ptr)
877 {
878         struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
879                                                      uplink_priv.netdevice_nb);
880         struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
881         struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
882
883         if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
884             !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
885                 return NOTIFY_OK;
886
887         switch (event) {
888         case NETDEV_REGISTER:
889                 mlx5e_rep_indr_register_block(rpriv, netdev);
890                 break;
891         case NETDEV_UNREGISTER:
892                 mlx5e_rep_indr_unregister_block(rpriv, netdev);
893                 break;
894         }
895         return NOTIFY_OK;
896 }
897
898 static void
899 mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
900                                   struct mlx5e_neigh_hash_entry *nhe,
901                                   struct neighbour *n)
902 {
903         /* Take a reference to ensure the neighbour and mlx5 encap
904          * entry won't be destructed until we drop the reference in
905          * delayed work.
906          */
907         neigh_hold(n);
908
909         /* This assignment is valid as long as the the neigh reference
910          * is taken
911          */
912         nhe->n = n;
913
914         if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
915                 mlx5e_rep_neigh_entry_release(nhe);
916                 neigh_release(n);
917         }
918 }
919
920 static struct mlx5e_neigh_hash_entry *
921 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
922                              struct mlx5e_neigh *m_neigh);
923
924 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
925                                     unsigned long event, void *ptr)
926 {
927         struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
928                                                     neigh_update.netevent_nb);
929         struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
930         struct net_device *netdev = rpriv->netdev;
931         struct mlx5e_priv *priv = netdev_priv(netdev);
932         struct mlx5e_neigh_hash_entry *nhe = NULL;
933         struct mlx5e_neigh m_neigh = {};
934         struct neigh_parms *p;
935         struct neighbour *n;
936         bool found = false;
937
938         switch (event) {
939         case NETEVENT_NEIGH_UPDATE:
940                 n = ptr;
941 #if IS_ENABLED(CONFIG_IPV6)
942                 if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
943 #else
944                 if (n->tbl != &arp_tbl)
945 #endif
946                         return NOTIFY_DONE;
947
948                 m_neigh.dev = n->dev;
949                 m_neigh.family = n->ops->family;
950                 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
951
952                 rcu_read_lock();
953                 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
954                 rcu_read_unlock();
955                 if (!nhe)
956                         return NOTIFY_DONE;
957
958                 mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
959                 break;
960
961         case NETEVENT_DELAY_PROBE_TIME_UPDATE:
962                 p = ptr;
963
964                 /* We check the device is present since we don't care about
965                  * changes in the default table, we only care about changes
966                  * done per device delay prob time parameter.
967                  */
968 #if IS_ENABLED(CONFIG_IPV6)
969                 if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
970 #else
971                 if (!p->dev || p->tbl != &arp_tbl)
972 #endif
973                         return NOTIFY_DONE;
974
975                 rcu_read_lock();
976                 list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
977                                         neigh_list) {
978                         if (p->dev == nhe->m_neigh.dev) {
979                                 found = true;
980                                 break;
981                         }
982                 }
983                 rcu_read_unlock();
984                 if (!found)
985                         return NOTIFY_DONE;
986
987                 neigh_update->min_interval = min_t(unsigned long,
988                                                    NEIGH_VAR(p, DELAY_PROBE_TIME),
989                                                    neigh_update->min_interval);
990                 mlx5_fc_update_sampling_interval(priv->mdev,
991                                                  neigh_update->min_interval);
992                 break;
993         }
994         return NOTIFY_DONE;
995 }
996
997 static const struct rhashtable_params mlx5e_neigh_ht_params = {
998         .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
999         .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
1000         .key_len = sizeof(struct mlx5e_neigh),
1001         .automatic_shrinking = true,
1002 };
1003
1004 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
1005 {
1006         struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1007         int err;
1008
1009         err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
1010         if (err)
1011                 return err;
1012
1013         INIT_LIST_HEAD(&neigh_update->neigh_list);
1014         mutex_init(&neigh_update->encap_lock);
1015         INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
1016                           mlx5e_rep_neigh_stats_work);
1017         mlx5e_rep_neigh_update_init_interval(rpriv);
1018
1019         rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
1020         err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
1021         if (err)
1022                 goto out_err;
1023         return 0;
1024
1025 out_err:
1026         rhashtable_destroy(&neigh_update->neigh_ht);
1027         return err;
1028 }
1029
1030 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
1031 {
1032         struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1033         struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1034
1035         unregister_netevent_notifier(&neigh_update->netevent_nb);
1036
1037         flush_workqueue(priv->wq); /* flush neigh update works */
1038
1039         cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
1040
1041         mutex_destroy(&neigh_update->encap_lock);
1042         rhashtable_destroy(&neigh_update->neigh_ht);
1043 }
1044
1045 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
1046                                         struct mlx5e_neigh_hash_entry *nhe)
1047 {
1048         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1049         int err;
1050
1051         err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
1052                                      &nhe->rhash_node,
1053                                      mlx5e_neigh_ht_params);
1054         if (err)
1055                 return err;
1056
1057         list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
1058
1059         return err;
1060 }
1061
1062 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
1063 {
1064         struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
1065
1066         mutex_lock(&rpriv->neigh_update.encap_lock);
1067
1068         list_del_rcu(&nhe->neigh_list);
1069
1070         rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1071                                &nhe->rhash_node,
1072                                mlx5e_neigh_ht_params);
1073         mutex_unlock(&rpriv->neigh_update.encap_lock);
1074 }
1075
1076 /* This function must only be called under the representor's encap_lock or
1077  * inside rcu read lock section.
1078  */
1079 static struct mlx5e_neigh_hash_entry *
1080 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1081                              struct mlx5e_neigh *m_neigh)
1082 {
1083         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1084         struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1085         struct mlx5e_neigh_hash_entry *nhe;
1086
1087         nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1088                                      mlx5e_neigh_ht_params);
1089         return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
1090 }
1091
1092 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1093                                         struct mlx5e_encap_entry *e,
1094                                         struct mlx5e_neigh_hash_entry **nhe)
1095 {
1096         int err;
1097
1098         *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1099         if (!*nhe)
1100                 return -ENOMEM;
1101
1102         (*nhe)->priv = priv;
1103         memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1104         INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1105         spin_lock_init(&(*nhe)->encap_list_lock);
1106         INIT_LIST_HEAD(&(*nhe)->encap_list);
1107         refcount_set(&(*nhe)->refcnt, 1);
1108
1109         err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1110         if (err)
1111                 goto out_free;
1112         return 0;
1113
1114 out_free:
1115         kfree(*nhe);
1116         return err;
1117 }
1118
1119 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1120                                  struct mlx5e_encap_entry *e)
1121 {
1122         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1123         struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1124         struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1125         struct mlx5e_neigh_hash_entry *nhe;
1126         int err;
1127
1128         err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1129         if (err)
1130                 return err;
1131
1132         mutex_lock(&rpriv->neigh_update.encap_lock);
1133         nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1134         if (!nhe) {
1135                 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1136                 if (err) {
1137                         mutex_unlock(&rpriv->neigh_update.encap_lock);
1138                         mlx5_tun_entropy_refcount_dec(tun_entropy,
1139                                                       e->reformat_type);
1140                         return err;
1141                 }
1142         }
1143
1144         e->nhe = nhe;
1145         spin_lock(&nhe->encap_list_lock);
1146         list_add_rcu(&e->encap_list, &nhe->encap_list);
1147         spin_unlock(&nhe->encap_list_lock);
1148
1149         mutex_unlock(&rpriv->neigh_update.encap_lock);
1150
1151         return 0;
1152 }
1153
1154 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1155                                   struct mlx5e_encap_entry *e)
1156 {
1157         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1158         struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1159         struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1160
1161         if (!e->nhe)
1162                 return;
1163
1164         spin_lock(&e->nhe->encap_list_lock);
1165         list_del_rcu(&e->encap_list);
1166         spin_unlock(&e->nhe->encap_list_lock);
1167
1168         mlx5e_rep_neigh_entry_release(e->nhe);
1169         e->nhe = NULL;
1170         mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
1171 }
1172
1173 static int mlx5e_rep_open(struct net_device *dev)
1174 {
1175         struct mlx5e_priv *priv = netdev_priv(dev);
1176         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1177         struct mlx5_eswitch_rep *rep = rpriv->rep;
1178         int err;
1179
1180         mutex_lock(&priv->state_lock);
1181         err = mlx5e_open_locked(dev);
1182         if (err)
1183                 goto unlock;
1184
1185         if (!mlx5_modify_vport_admin_state(priv->mdev,
1186                                            MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1187                                            rep->vport, 1,
1188                                            MLX5_VPORT_ADMIN_STATE_UP))
1189                 netif_carrier_on(dev);
1190
1191 unlock:
1192         mutex_unlock(&priv->state_lock);
1193         return err;
1194 }
1195
1196 static int mlx5e_rep_close(struct net_device *dev)
1197 {
1198         struct mlx5e_priv *priv = netdev_priv(dev);
1199         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1200         struct mlx5_eswitch_rep *rep = rpriv->rep;
1201         int ret;
1202
1203         mutex_lock(&priv->state_lock);
1204         mlx5_modify_vport_admin_state(priv->mdev,
1205                                       MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1206                                       rep->vport, 1,
1207                                       MLX5_VPORT_ADMIN_STATE_DOWN);
1208         ret = mlx5e_close_locked(dev);
1209         mutex_unlock(&priv->state_lock);
1210         return ret;
1211 }
1212
1213 static int
1214 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1215                               struct flow_cls_offload *cls_flower, int flags)
1216 {
1217         switch (cls_flower->command) {
1218         case FLOW_CLS_REPLACE:
1219                 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1220                                               flags);
1221         case FLOW_CLS_DESTROY:
1222                 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1223                                            flags);
1224         case FLOW_CLS_STATS:
1225                 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1226                                           flags);
1227         default:
1228                 return -EOPNOTSUPP;
1229         }
1230 }
1231
1232 static
1233 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
1234                                     struct tc_cls_matchall_offload *ma)
1235 {
1236         switch (ma->command) {
1237         case TC_CLSMATCHALL_REPLACE:
1238                 return mlx5e_tc_configure_matchall(priv, ma);
1239         case TC_CLSMATCHALL_DESTROY:
1240                 return mlx5e_tc_delete_matchall(priv, ma);
1241         case TC_CLSMATCHALL_STATS:
1242                 mlx5e_tc_stats_matchall(priv, ma);
1243                 return 0;
1244         default:
1245                 return -EOPNOTSUPP;
1246         }
1247 }
1248
1249 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1250                                  void *cb_priv)
1251 {
1252         unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
1253         struct mlx5e_priv *priv = cb_priv;
1254
1255         switch (type) {
1256         case TC_SETUP_CLSFLOWER:
1257                 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
1258         case TC_SETUP_CLSMATCHALL:
1259                 return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
1260         default:
1261                 return -EOPNOTSUPP;
1262         }
1263 }
1264
1265 static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
1266                                  void *cb_priv)
1267 {
1268         struct flow_cls_offload tmp, *f = type_data;
1269         struct mlx5e_priv *priv = cb_priv;
1270         struct mlx5_eswitch *esw;
1271         unsigned long flags;
1272         int err;
1273
1274         flags = MLX5_TC_FLAG(INGRESS) |
1275                 MLX5_TC_FLAG(ESW_OFFLOAD) |
1276                 MLX5_TC_FLAG(FT_OFFLOAD);
1277         esw = priv->mdev->priv.eswitch;
1278
1279         switch (type) {
1280         case TC_SETUP_CLSFLOWER:
1281                 memcpy(&tmp, f, sizeof(*f));
1282
1283                 if (!mlx5_esw_chains_prios_supported(esw) ||
1284                     tmp.common.chain_index)
1285                         return -EOPNOTSUPP;
1286
1287                 /* Re-use tc offload path by moving the ft flow to the
1288                  * reserved ft chain.
1289                  *
1290                  * FT offload can use prio range [0, INT_MAX], so we normalize
1291                  * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
1292                  * as with tc, where prio 0 isn't supported.
1293                  *
1294                  * We only support chain 0 of FT offload.
1295                  */
1296                 if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
1297                         return -EOPNOTSUPP;
1298                 if (tmp.common.chain_index != 0)
1299                         return -EOPNOTSUPP;
1300
1301                 tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
1302                 tmp.common.prio++;
1303                 err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
1304                 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
1305                 return err;
1306         default:
1307                 return -EOPNOTSUPP;
1308         }
1309 }
1310
1311 static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
1312 static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
1313 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1314                               void *type_data)
1315 {
1316         struct mlx5e_priv *priv = netdev_priv(dev);
1317         struct flow_block_offload *f = type_data;
1318
1319         f->unlocked_driver_cb = true;
1320
1321         switch (type) {
1322         case TC_SETUP_BLOCK:
1323                 return flow_block_cb_setup_simple(type_data,
1324                                                   &mlx5e_rep_block_tc_cb_list,
1325                                                   mlx5e_rep_setup_tc_cb,
1326                                                   priv, priv, true);
1327         case TC_SETUP_FT:
1328                 return flow_block_cb_setup_simple(type_data,
1329                                                   &mlx5e_rep_block_ft_cb_list,
1330                                                   mlx5e_rep_setup_ft_cb,
1331                                                   priv, priv, true);
1332         default:
1333                 return -EOPNOTSUPP;
1334         }
1335 }
1336
1337 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1338 {
1339         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1340         struct mlx5_eswitch_rep *rep;
1341
1342         if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1343                 return false;
1344
1345         if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1346                 return false;
1347
1348         rep = rpriv->rep;
1349         return (rep->vport == MLX5_VPORT_UPLINK);
1350 }
1351
1352 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
1353 {
1354         switch (attr_id) {
1355         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1356                         return true;
1357         }
1358
1359         return false;
1360 }
1361
1362 static int
1363 mlx5e_get_sw_stats64(const struct net_device *dev,
1364                      struct rtnl_link_stats64 *stats)
1365 {
1366         struct mlx5e_priv *priv = netdev_priv(dev);
1367
1368         mlx5e_fold_sw_stats64(priv, stats);
1369         return 0;
1370 }
1371
1372 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1373                                        void *sp)
1374 {
1375         switch (attr_id) {
1376         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1377                 return mlx5e_get_sw_stats64(dev, sp);
1378         }
1379
1380         return -EINVAL;
1381 }
1382
1383 static void
1384 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1385 {
1386         struct mlx5e_priv *priv = netdev_priv(dev);
1387
1388         /* update HW stats in background for next time */
1389         mlx5e_queue_update_stats(priv);
1390         memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
1391 }
1392
1393 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
1394 {
1395         return mlx5e_change_mtu(netdev, new_mtu, NULL);
1396 }
1397
1398 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
1399 {
1400         return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
1401 }
1402
1403 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1404 {
1405         struct sockaddr *saddr = addr;
1406
1407         if (!is_valid_ether_addr(saddr->sa_data))
1408                 return -EADDRNOTAVAIL;
1409
1410         ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1411         return 0;
1412 }
1413
1414 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1415                                         __be16 vlan_proto)
1416 {
1417         netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1418
1419         if (vlan != 0)
1420                 return -EOPNOTSUPP;
1421
1422         /* allow setting 0-vid for compatibility with libvirt */
1423         return 0;
1424 }
1425
1426 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
1427 {
1428         struct mlx5e_priv *priv = netdev_priv(dev);
1429         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1430
1431         return &rpriv->dl_port;
1432 }
1433
1434 static const struct net_device_ops mlx5e_netdev_ops_rep = {
1435         .ndo_open                = mlx5e_rep_open,
1436         .ndo_stop                = mlx5e_rep_close,
1437         .ndo_start_xmit          = mlx5e_xmit,
1438         .ndo_setup_tc            = mlx5e_rep_setup_tc,
1439         .ndo_get_devlink_port    = mlx5e_rep_get_devlink_port,
1440         .ndo_get_stats64         = mlx5e_rep_get_stats,
1441         .ndo_has_offload_stats   = mlx5e_rep_has_offload_stats,
1442         .ndo_get_offload_stats   = mlx5e_rep_get_offload_stats,
1443         .ndo_change_mtu          = mlx5e_rep_change_mtu,
1444 };
1445
1446 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1447         .ndo_open                = mlx5e_open,
1448         .ndo_stop                = mlx5e_close,
1449         .ndo_start_xmit          = mlx5e_xmit,
1450         .ndo_set_mac_address     = mlx5e_uplink_rep_set_mac,
1451         .ndo_setup_tc            = mlx5e_rep_setup_tc,
1452         .ndo_get_devlink_port    = mlx5e_rep_get_devlink_port,
1453         .ndo_get_stats64         = mlx5e_get_stats,
1454         .ndo_has_offload_stats   = mlx5e_rep_has_offload_stats,
1455         .ndo_get_offload_stats   = mlx5e_rep_get_offload_stats,
1456         .ndo_change_mtu          = mlx5e_uplink_rep_change_mtu,
1457         .ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
1458         .ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
1459         .ndo_features_check      = mlx5e_features_check,
1460         .ndo_set_vf_mac          = mlx5e_set_vf_mac,
1461         .ndo_set_vf_rate         = mlx5e_set_vf_rate,
1462         .ndo_get_vf_config       = mlx5e_get_vf_config,
1463         .ndo_get_vf_stats        = mlx5e_get_vf_stats,
1464         .ndo_set_vf_vlan         = mlx5e_uplink_rep_set_vf_vlan,
1465         .ndo_set_features        = mlx5e_set_features,
1466 };
1467
1468 bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
1469 {
1470         return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
1471 }
1472
1473 bool mlx5e_eswitch_rep(struct net_device *netdev)
1474 {
1475         if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
1476             netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1477                 return true;
1478
1479         return false;
1480 }
1481
1482 static void mlx5e_build_rep_params(struct net_device *netdev)
1483 {
1484         struct mlx5e_priv *priv = netdev_priv(netdev);
1485         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1486         struct mlx5_eswitch_rep *rep = rpriv->rep;
1487         struct mlx5_core_dev *mdev = priv->mdev;
1488         struct mlx5e_params *params;
1489
1490         u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1491                                          MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1492                                          MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1493
1494         params = &priv->channels.params;
1495         params->hard_mtu    = MLX5E_ETH_HARD_MTU;
1496         params->sw_mtu      = netdev->mtu;
1497
1498         /* SQ */
1499         if (rep->vport == MLX5_VPORT_UPLINK)
1500                 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1501         else
1502                 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
1503
1504         /* RQ */
1505         mlx5e_build_rq_params(mdev, params);
1506
1507         /* CQ moderation params */
1508         params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1509         mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1510
1511         params->num_tc                = 1;
1512         params->tunneled_offload_en = false;
1513
1514         mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
1515
1516         /* RSS */
1517         mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
1518 }
1519
1520 static void mlx5e_build_rep_netdev(struct net_device *netdev)
1521 {
1522         struct mlx5e_priv *priv = netdev_priv(netdev);
1523         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1524         struct mlx5_eswitch_rep *rep = rpriv->rep;
1525         struct mlx5_core_dev *mdev = priv->mdev;
1526
1527         if (rep->vport == MLX5_VPORT_UPLINK) {
1528                 SET_NETDEV_DEV(netdev, mdev->device);
1529                 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1530                 /* we want a persistent mac for the uplink rep */
1531                 mlx5_query_mac_address(mdev, netdev->dev_addr);
1532                 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
1533 #ifdef CONFIG_MLX5_CORE_EN_DCB
1534                 if (MLX5_CAP_GEN(mdev, qos))
1535                         netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1536 #endif
1537         } else {
1538                 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
1539                 eth_hw_addr_random(netdev);
1540                 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
1541         }
1542
1543         netdev->watchdog_timeo    = 15 * HZ;
1544
1545         netdev->features       |= NETIF_F_NETNS_LOCAL;
1546
1547         netdev->hw_features    |= NETIF_F_HW_TC;
1548         netdev->hw_features    |= NETIF_F_SG;
1549         netdev->hw_features    |= NETIF_F_IP_CSUM;
1550         netdev->hw_features    |= NETIF_F_IPV6_CSUM;
1551         netdev->hw_features    |= NETIF_F_GRO;
1552         netdev->hw_features    |= NETIF_F_TSO;
1553         netdev->hw_features    |= NETIF_F_TSO6;
1554         netdev->hw_features    |= NETIF_F_RXCSUM;
1555
1556         if (rep->vport == MLX5_VPORT_UPLINK)
1557                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1558         else
1559                 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1560
1561         netdev->features |= netdev->hw_features;
1562 }
1563
1564 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1565                           struct net_device *netdev,
1566                           const struct mlx5e_profile *profile,
1567                           void *ppriv)
1568 {
1569         struct mlx5e_priv *priv = netdev_priv(netdev);
1570         int err;
1571
1572         err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1573         if (err)
1574                 return err;
1575
1576         priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1577
1578         mlx5e_build_rep_params(netdev);
1579         mlx5e_build_rep_netdev(netdev);
1580
1581         mlx5e_timestamp_init(priv);
1582
1583         return 0;
1584 }
1585
1586 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1587 {
1588         mlx5e_netdev_cleanup(priv->netdev, priv);
1589 }
1590
1591 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1592 {
1593         struct ttc_params ttc_params = {};
1594         int tt, err;
1595
1596         priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1597                                               MLX5_FLOW_NAMESPACE_KERNEL);
1598
1599         /* The inner_ttc in the ttc params is intentionally not set */
1600         ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1601         mlx5e_set_ttc_ft_params(&ttc_params);
1602         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1603                 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1604
1605         err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1606         if (err) {
1607                 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1608                 return err;
1609         }
1610         return 0;
1611 }
1612
1613 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1614 {
1615         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1616         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1617         struct mlx5_eswitch_rep *rep = rpriv->rep;
1618         struct mlx5_flow_handle *flow_rule;
1619         struct mlx5_flow_destination dest;
1620
1621         dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1622         dest.tir_num = priv->direct_tir[0].tirn;
1623         flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1624                                                       rep->vport,
1625                                                       &dest);
1626         if (IS_ERR(flow_rule))
1627                 return PTR_ERR(flow_rule);
1628         rpriv->vport_rx_rule = flow_rule;
1629         return 0;
1630 }
1631
1632 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1633 {
1634         struct mlx5_core_dev *mdev = priv->mdev;
1635         int err;
1636
1637         mlx5e_init_l2_addr(priv);
1638
1639         err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1640         if (err) {
1641                 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1642                 return err;
1643         }
1644
1645         err = mlx5e_create_indirect_rqt(priv);
1646         if (err)
1647                 goto err_close_drop_rq;
1648
1649         err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
1650         if (err)
1651                 goto err_destroy_indirect_rqts;
1652
1653         err = mlx5e_create_indirect_tirs(priv, false);
1654         if (err)
1655                 goto err_destroy_direct_rqts;
1656
1657         err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
1658         if (err)
1659                 goto err_destroy_indirect_tirs;
1660
1661         err = mlx5e_create_rep_ttc_table(priv);
1662         if (err)
1663                 goto err_destroy_direct_tirs;
1664
1665         err = mlx5e_create_rep_vport_rx_rule(priv);
1666         if (err)
1667                 goto err_destroy_ttc_table;
1668
1669         return 0;
1670
1671 err_destroy_ttc_table:
1672         mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1673 err_destroy_direct_tirs:
1674         mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1675 err_destroy_indirect_tirs:
1676         mlx5e_destroy_indirect_tirs(priv, false);
1677 err_destroy_direct_rqts:
1678         mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1679 err_destroy_indirect_rqts:
1680         mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1681 err_close_drop_rq:
1682         mlx5e_close_drop_rq(&priv->drop_rq);
1683         return err;
1684 }
1685
1686 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1687 {
1688         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1689
1690         mlx5_del_flow_rules(rpriv->vport_rx_rule);
1691         mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1692         mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1693         mlx5e_destroy_indirect_tirs(priv, false);
1694         mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1695         mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1696         mlx5e_close_drop_rq(&priv->drop_rq);
1697 }
1698
1699 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
1700 {
1701         int err = mlx5e_init_rep_rx(priv);
1702
1703         if (err)
1704                 return err;
1705
1706         mlx5e_create_q_counters(priv);
1707         return 0;
1708 }
1709
1710 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
1711 {
1712         mlx5e_destroy_q_counters(priv);
1713         mlx5e_cleanup_rep_rx(priv);
1714 }
1715
1716 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1717 {
1718         struct mlx5_rep_uplink_priv *uplink_priv;
1719         struct net_device *netdev;
1720         struct mlx5e_priv *priv;
1721         int err;
1722
1723         netdev = rpriv->netdev;
1724         priv = netdev_priv(netdev);
1725         uplink_priv = &rpriv->uplink_priv;
1726
1727         mutex_init(&uplink_priv->unready_flows_lock);
1728         INIT_LIST_HEAD(&uplink_priv->unready_flows);
1729
1730         /* init shared tc flow table */
1731         err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1732         if (err)
1733                 return err;
1734
1735         mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1736
1737         /* init indirect block notifications */
1738         INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1739         uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1740         err = register_netdevice_notifier_dev_net(rpriv->netdev,
1741                                                   &uplink_priv->netdevice_nb,
1742                                                   &uplink_priv->netdevice_nn);
1743         if (err) {
1744                 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1745                 goto tc_esw_cleanup;
1746         }
1747
1748         return 0;
1749
1750 tc_esw_cleanup:
1751         mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1752         return err;
1753 }
1754
1755 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1756 {
1757         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1758         int err;
1759
1760         err = mlx5e_create_tises(priv);
1761         if (err) {
1762                 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1763                 return err;
1764         }
1765
1766         if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1767                 err = mlx5e_init_uplink_rep_tx(rpriv);
1768                 if (err)
1769                         goto destroy_tises;
1770         }
1771
1772         return 0;
1773
1774 destroy_tises:
1775         mlx5e_destroy_tises(priv);
1776         return err;
1777 }
1778
1779 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
1780 {
1781         struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1782
1783         /* clean indirect TC block notifications */
1784         unregister_netdevice_notifier_dev_net(rpriv->netdev,
1785                                               &uplink_priv->netdevice_nb,
1786                                               &uplink_priv->netdevice_nn);
1787         mlx5e_rep_indr_clean_block_privs(rpriv);
1788
1789         /* delete shared tc flow table */
1790         mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1791         mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
1792 }
1793
1794 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1795 {
1796         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1797
1798         mlx5e_destroy_tises(priv);
1799
1800         if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
1801                 mlx5e_cleanup_uplink_rep_tx(rpriv);
1802 }
1803
1804 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1805 {
1806         mlx5e_set_netdev_mtu_boundaries(priv);
1807 }
1808
1809 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1810 {
1811         return 0;
1812 }
1813
1814 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1815 {
1816         struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1817
1818         if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1819                 struct mlx5_eqe *eqe = data;
1820
1821                 switch (eqe->sub_type) {
1822                 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1823                 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1824                         queue_work(priv->wq, &priv->update_carrier_work);
1825                         break;
1826                 default:
1827                         return NOTIFY_DONE;
1828                 }
1829
1830                 return NOTIFY_OK;
1831         }
1832
1833         if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1834                 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1835
1836                 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1837
1838                 return NOTIFY_OK;
1839         }
1840
1841         return NOTIFY_DONE;
1842 }
1843
1844 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1845 {
1846         struct net_device *netdev = priv->netdev;
1847         struct mlx5_core_dev *mdev = priv->mdev;
1848         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1849         u16 max_mtu;
1850
1851         netdev->min_mtu = ETH_MIN_MTU;
1852         mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1853         netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1854         mlx5e_set_dev_port_mtu(priv);
1855
1856         INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1857                   mlx5e_tc_reoffload_flows_work);
1858
1859         mlx5_lag_add(mdev, netdev);
1860         priv->events_nb.notifier_call = uplink_rep_async_event;
1861         mlx5_notifier_register(mdev, &priv->events_nb);
1862 #ifdef CONFIG_MLX5_CORE_EN_DCB
1863         mlx5e_dcbnl_initialize(priv);
1864         mlx5e_dcbnl_init_app(priv);
1865 #endif
1866 }
1867
1868 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1869 {
1870         struct mlx5_core_dev *mdev = priv->mdev;
1871         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1872
1873 #ifdef CONFIG_MLX5_CORE_EN_DCB
1874         mlx5e_dcbnl_delete_app(priv);
1875 #endif
1876         mlx5_notifier_unregister(mdev, &priv->events_nb);
1877         cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
1878         mlx5_lag_remove(mdev);
1879 }
1880
1881 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1882 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1883
1884 /* The stats groups order is opposite to the update_stats() order calls */
1885 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1886         &MLX5E_STATS_GRP(sw_rep),
1887         &MLX5E_STATS_GRP(vport_rep),
1888 };
1889
1890 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1891 {
1892         return ARRAY_SIZE(mlx5e_rep_stats_grps);
1893 }
1894
1895 /* The stats groups order is opposite to the update_stats() order calls */
1896 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1897         &MLX5E_STATS_GRP(sw),
1898         &MLX5E_STATS_GRP(qcnt),
1899         &MLX5E_STATS_GRP(vnic_env),
1900         &MLX5E_STATS_GRP(vport),
1901         &MLX5E_STATS_GRP(802_3),
1902         &MLX5E_STATS_GRP(2863),
1903         &MLX5E_STATS_GRP(2819),
1904         &MLX5E_STATS_GRP(phy),
1905         &MLX5E_STATS_GRP(eth_ext),
1906         &MLX5E_STATS_GRP(pcie),
1907         &MLX5E_STATS_GRP(per_prio),
1908         &MLX5E_STATS_GRP(pme),
1909         &MLX5E_STATS_GRP(channels),
1910         &MLX5E_STATS_GRP(per_port_buff_congest),
1911 };
1912
1913 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1914 {
1915         return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1916 }
1917
1918 static const struct mlx5e_profile mlx5e_rep_profile = {
1919         .init                   = mlx5e_init_rep,
1920         .cleanup                = mlx5e_cleanup_rep,
1921         .init_rx                = mlx5e_init_rep_rx,
1922         .cleanup_rx             = mlx5e_cleanup_rep_rx,
1923         .init_tx                = mlx5e_init_rep_tx,
1924         .cleanup_tx             = mlx5e_cleanup_rep_tx,
1925         .enable                 = mlx5e_rep_enable,
1926         .update_rx              = mlx5e_update_rep_rx,
1927         .update_stats           = mlx5e_update_ndo_stats,
1928         .rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1929         .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1930         .max_tc                 = 1,
1931         .rq_groups              = MLX5E_NUM_RQ_GROUPS(REGULAR),
1932         .stats_grps             = mlx5e_rep_stats_grps,
1933         .stats_grps_num         = mlx5e_rep_stats_grps_num,
1934 };
1935
1936 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1937         .init                   = mlx5e_init_rep,
1938         .cleanup                = mlx5e_cleanup_rep,
1939         .init_rx                = mlx5e_init_ul_rep_rx,
1940         .cleanup_rx             = mlx5e_cleanup_ul_rep_rx,
1941         .init_tx                = mlx5e_init_rep_tx,
1942         .cleanup_tx             = mlx5e_cleanup_rep_tx,
1943         .enable                 = mlx5e_uplink_rep_enable,
1944         .disable                = mlx5e_uplink_rep_disable,
1945         .update_rx              = mlx5e_update_rep_rx,
1946         .update_stats           = mlx5e_update_ndo_stats,
1947         .update_carrier         = mlx5e_update_carrier,
1948         .rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1949         .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1950         .max_tc                 = MLX5E_MAX_NUM_TC,
1951         .rq_groups              = MLX5E_NUM_RQ_GROUPS(REGULAR),
1952         .stats_grps             = mlx5e_ul_rep_stats_grps,
1953         .stats_grps_num         = mlx5e_ul_rep_stats_grps_num,
1954 };
1955
1956 static bool
1957 is_devlink_port_supported(const struct mlx5_core_dev *dev,
1958                           const struct mlx5e_rep_priv *rpriv)
1959 {
1960         return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
1961                rpriv->rep->vport == MLX5_VPORT_PF ||
1962                mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
1963 }
1964
1965 static unsigned int
1966 vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
1967 {
1968         return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
1969 }
1970
1971 static int register_devlink_port(struct mlx5_core_dev *dev,
1972                                  struct mlx5e_rep_priv *rpriv)
1973 {
1974         struct devlink *devlink = priv_to_devlink(dev);
1975         struct mlx5_eswitch_rep *rep = rpriv->rep;
1976         struct netdev_phys_item_id ppid = {};
1977         unsigned int dl_port_index = 0;
1978
1979         if (!is_devlink_port_supported(dev, rpriv))
1980                 return 0;
1981
1982         mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
1983
1984         if (rep->vport == MLX5_VPORT_UPLINK) {
1985                 devlink_port_attrs_set(&rpriv->dl_port,
1986                                        DEVLINK_PORT_FLAVOUR_PHYSICAL,
1987                                        PCI_FUNC(dev->pdev->devfn), false, 0,
1988                                        &ppid.id[0], ppid.id_len);
1989                 dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
1990         } else if (rep->vport == MLX5_VPORT_PF) {
1991                 devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
1992                                               &ppid.id[0], ppid.id_len,
1993                                               dev->pdev->devfn);
1994                 dl_port_index = rep->vport;
1995         } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
1996                                             rpriv->rep->vport)) {
1997                 devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
1998                                               &ppid.id[0], ppid.id_len,
1999                                               dev->pdev->devfn,
2000                                               rep->vport - 1);
2001                 dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
2002         }
2003
2004         return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
2005 }
2006
2007 static void unregister_devlink_port(struct mlx5_core_dev *dev,
2008                                     struct mlx5e_rep_priv *rpriv)
2009 {
2010         if (is_devlink_port_supported(dev, rpriv))
2011                 devlink_port_unregister(&rpriv->dl_port);
2012 }
2013
2014 /* e-Switch vport representors */
2015 static int
2016 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
2017 {
2018         const struct mlx5e_profile *profile;
2019         struct mlx5e_rep_priv *rpriv;
2020         struct net_device *netdev;
2021         int nch, err;
2022
2023         rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
2024         if (!rpriv)
2025                 return -ENOMEM;
2026
2027         /* rpriv->rep to be looked up when profile->init() is called */
2028         rpriv->rep = rep;
2029
2030         nch = mlx5e_get_max_num_channels(dev);
2031         profile = (rep->vport == MLX5_VPORT_UPLINK) ?
2032                   &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
2033         netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
2034         if (!netdev) {
2035                 mlx5_core_warn(dev,
2036                                "Failed to create representor netdev for vport %d\n",
2037                                rep->vport);
2038                 kfree(rpriv);
2039                 return -EINVAL;
2040         }
2041
2042         dev_net_set(netdev, mlx5_core_net(dev));
2043         rpriv->netdev = netdev;
2044         rep->rep_data[REP_ETH].priv = rpriv;
2045         INIT_LIST_HEAD(&rpriv->vport_sqs_list);
2046
2047         if (rep->vport == MLX5_VPORT_UPLINK) {
2048                 err = mlx5e_create_mdev_resources(dev);
2049                 if (err)
2050                         goto err_destroy_netdev;
2051         }
2052
2053         err = mlx5e_attach_netdev(netdev_priv(netdev));
2054         if (err) {
2055                 netdev_warn(netdev,
2056                             "Failed to attach representor netdev for vport %d\n",
2057                             rep->vport);
2058                 goto err_destroy_mdev_resources;
2059         }
2060
2061         err = mlx5e_rep_neigh_init(rpriv);
2062         if (err) {
2063                 netdev_warn(netdev,
2064                             "Failed to initialized neighbours handling for vport %d\n",
2065                             rep->vport);
2066                 goto err_detach_netdev;
2067         }
2068
2069         err = register_devlink_port(dev, rpriv);
2070         if (err) {
2071                 netdev_warn(netdev, "Failed to register devlink port %d\n",
2072                             rep->vport);
2073                 goto err_neigh_cleanup;
2074         }
2075
2076         err = register_netdev(netdev);
2077         if (err) {
2078                 netdev_warn(netdev,
2079                             "Failed to register representor netdev for vport %d\n",
2080                             rep->vport);
2081                 goto err_devlink_cleanup;
2082         }
2083
2084         if (is_devlink_port_supported(dev, rpriv))
2085                 devlink_port_type_eth_set(&rpriv->dl_port, netdev);
2086         return 0;
2087
2088 err_devlink_cleanup:
2089         unregister_devlink_port(dev, rpriv);
2090
2091 err_neigh_cleanup:
2092         mlx5e_rep_neigh_cleanup(rpriv);
2093
2094 err_detach_netdev:
2095         mlx5e_detach_netdev(netdev_priv(netdev));
2096
2097 err_destroy_mdev_resources:
2098         if (rep->vport == MLX5_VPORT_UPLINK)
2099                 mlx5e_destroy_mdev_resources(dev);
2100
2101 err_destroy_netdev:
2102         mlx5e_destroy_netdev(netdev_priv(netdev));
2103         kfree(rpriv);
2104         return err;
2105 }
2106
2107 static void
2108 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
2109 {
2110         struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
2111         struct net_device *netdev = rpriv->netdev;
2112         struct mlx5e_priv *priv = netdev_priv(netdev);
2113         struct mlx5_core_dev *dev = priv->mdev;
2114         void *ppriv = priv->ppriv;
2115
2116         if (is_devlink_port_supported(dev, rpriv))
2117                 devlink_port_type_clear(&rpriv->dl_port);
2118         unregister_netdev(netdev);
2119         unregister_devlink_port(dev, rpriv);
2120         mlx5e_rep_neigh_cleanup(rpriv);
2121         mlx5e_detach_netdev(priv);
2122         if (rep->vport == MLX5_VPORT_UPLINK)
2123                 mlx5e_destroy_mdev_resources(priv->mdev);
2124         mlx5e_destroy_netdev(priv);
2125         kfree(ppriv); /* mlx5e_rep_priv */
2126 }
2127
2128 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
2129 {
2130         struct mlx5e_rep_priv *rpriv;
2131
2132         rpriv = mlx5e_rep_to_rep_priv(rep);
2133
2134         return rpriv->netdev;
2135 }
2136
2137 static const struct mlx5_eswitch_rep_ops rep_ops = {
2138         .load = mlx5e_vport_rep_load,
2139         .unload = mlx5e_vport_rep_unload,
2140         .get_proto_dev = mlx5e_vport_rep_get_proto_dev
2141 };
2142
2143 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
2144 {
2145         struct mlx5_eswitch *esw = mdev->priv.eswitch;
2146
2147         mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
2148 }
2149
2150 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
2151 {
2152         struct mlx5_eswitch *esw = mdev->priv.eswitch;
2153
2154         mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
2155 }