net/mlx5e: Make function mlx5e_change_rep_mtu() static
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
CommitLineData
cb67b832
HHZ
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <generated/utsrelease.h>
34#include <linux/mlx5/fs.h>
35#include <net/switchdev.h>
d957b4e3 36#include <net/pkt_cls.h>
717503b9 37#include <net/act_api.h>
232c0013
HHZ
38#include <net/netevent.h>
39#include <net/arp.h>
cb67b832
HHZ
40
41#include "eswitch.h"
42#include "en.h"
1d447a39 43#include "en_rep.h"
adb4c123 44#include "en_tc.h"
f6dfb4c3 45#include "fs_core.h"
cb67b832 46
4246f698
RD
47#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
48 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
49#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
50 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
51
cb67b832
HHZ
52static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
53
54static void mlx5e_rep_get_drvinfo(struct net_device *dev,
55 struct ethtool_drvinfo *drvinfo)
56{
57 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
58 sizeof(drvinfo->driver));
59 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
60}
61
62static const struct counter_desc sw_rep_stats_desc[] = {
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
67};
68
a228060a
OG
69struct vport_stats {
70 u64 vport_rx_packets;
71 u64 vport_tx_packets;
72 u64 vport_rx_bytes;
73 u64 vport_tx_bytes;
74};
75
76static const struct counter_desc vport_rep_stats_desc[] = {
77 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
78 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
79 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
80 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
81};
82
83#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
84#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
cb67b832
HHZ
85
86static void mlx5e_rep_get_strings(struct net_device *dev,
87 u32 stringset, uint8_t *data)
88{
a228060a 89 int i, j;
cb67b832
HHZ
90
91 switch (stringset) {
92 case ETH_SS_STATS:
a228060a 93 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
cb67b832
HHZ
94 strcpy(data + (i * ETH_GSTRING_LEN),
95 sw_rep_stats_desc[i].format);
a228060a
OG
96 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
97 strcpy(data + (i * ETH_GSTRING_LEN),
98 vport_rep_stats_desc[j].format);
cb67b832
HHZ
99 break;
100 }
101}
102
370bad0f
OG
103static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
104{
105 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
106 struct mlx5e_rep_priv *rpriv = priv->ppriv;
107 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
108 struct rtnl_link_stats64 *vport_stats;
109 struct ifla_vf_stats vf_stats;
110 int err;
111
112 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
113 if (err) {
114 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
115 return;
116 }
117
118 vport_stats = &priv->stats.vf_vport;
119 /* flip tx/rx as we are reporting the counters for the switch vport */
120 vport_stats->rx_packets = vf_stats.tx_packets;
121 vport_stats->rx_bytes = vf_stats.tx_bytes;
122 vport_stats->tx_packets = vf_stats.rx_packets;
123 vport_stats->tx_bytes = vf_stats.rx_bytes;
124}
125
126static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
cb67b832
HHZ
127{
128 struct mlx5e_sw_stats *s = &priv->stats.sw;
129 struct mlx5e_rq_stats *rq_stats;
130 struct mlx5e_sq_stats *sq_stats;
131 int i, j;
132
133 memset(s, 0, sizeof(*s));
ff9c852f
SM
134 for (i = 0; i < priv->channels.num; i++) {
135 struct mlx5e_channel *c = priv->channels.c[i];
136
05909bab 137 rq_stats = c->rq.stats;
cb67b832
HHZ
138
139 s->rx_packets += rq_stats->packets;
140 s->rx_bytes += rq_stats->bytes;
141
6a9764ef 142 for (j = 0; j < priv->channels.params.num_tc; j++) {
05909bab 143 sq_stats = c->sq[j].stats;
cb67b832
HHZ
144
145 s->tx_packets += sq_stats->packets;
146 s->tx_bytes += sq_stats->bytes;
147 }
148 }
370bad0f
OG
149}
150
cb67b832
HHZ
151static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
152 struct ethtool_stats *stats, u64 *data)
153{
154 struct mlx5e_priv *priv = netdev_priv(dev);
a228060a 155 int i, j;
cb67b832
HHZ
156
157 if (!data)
158 return;
159
160 mutex_lock(&priv->state_lock);
161 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
370bad0f 162 mlx5e_rep_update_sw_counters(priv);
a228060a 163 mlx5e_rep_update_hw_counters(priv);
cb67b832
HHZ
164 mutex_unlock(&priv->state_lock);
165
a228060a 166 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
cb67b832
HHZ
167 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
168 sw_rep_stats_desc, i);
a228060a
OG
169
170 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
171 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
172 vport_rep_stats_desc, j);
cb67b832
HHZ
173}
174
175static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
176{
177 switch (sset) {
178 case ETH_SS_STATS:
a228060a 179 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
cb67b832
HHZ
180 default:
181 return -EOPNOTSUPP;
182 }
183}
184
185static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
186 .get_drvinfo = mlx5e_rep_get_drvinfo,
187 .get_link = ethtool_op_get_link,
188 .get_strings = mlx5e_rep_get_strings,
189 .get_sset_count = mlx5e_rep_get_sset_count,
190 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
191};
192
193int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
194{
195 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
196 struct mlx5e_rep_priv *rpriv = priv->ppriv;
197 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 198 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
cb67b832
HHZ
199
200 if (esw->mode == SRIOV_NONE)
201 return -EOPNOTSUPP;
202
203 switch (attr->id) {
204 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
cb67b832 205 attr->u.ppid.id_len = ETH_ALEN;
dbe413e3 206 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
cb67b832
HHZ
207 break;
208 default:
209 return -EOPNOTSUPP;
210 }
211
212 return 0;
213}
214
f7a68945
MB
215static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
216 struct mlx5_eswitch_rep *rep)
217{
2c47bf80 218 struct mlx5e_rep_sq *rep_sq, *tmp;
5ed99fb4 219 struct mlx5e_rep_priv *rpriv;
f7a68945
MB
220
221 if (esw->mode != SRIOV_OFFLOADS)
222 return;
223
5ed99fb4 224 rpriv = mlx5e_rep_to_rep_priv(rep);
2c47bf80
MB
225 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
226 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
227 list_del(&rep_sq->list);
228 kfree(rep_sq);
f7a68945
MB
229 }
230}
231
232static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
233 struct mlx5_eswitch_rep *rep,
5ecadff0 234 u32 *sqns_array, int sqns_num)
f7a68945
MB
235{
236 struct mlx5_flow_handle *flow_rule;
5ed99fb4 237 struct mlx5e_rep_priv *rpriv;
2c47bf80 238 struct mlx5e_rep_sq *rep_sq;
f7a68945
MB
239 int err;
240 int i;
241
242 if (esw->mode != SRIOV_OFFLOADS)
243 return 0;
244
5ed99fb4 245 rpriv = mlx5e_rep_to_rep_priv(rep);
f7a68945 246 for (i = 0; i < sqns_num; i++) {
2c47bf80
MB
247 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
248 if (!rep_sq) {
f7a68945
MB
249 err = -ENOMEM;
250 goto out_err;
251 }
252
253 /* Add re-inject rule to the PF/representor sqs */
254 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
255 rep->vport,
256 sqns_array[i]);
257 if (IS_ERR(flow_rule)) {
258 err = PTR_ERR(flow_rule);
2c47bf80 259 kfree(rep_sq);
f7a68945
MB
260 goto out_err;
261 }
2c47bf80
MB
262 rep_sq->send_to_vport_rule = flow_rule;
263 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
f7a68945
MB
264 }
265 return 0;
266
267out_err:
268 mlx5e_sqs2vport_stop(esw, rep);
269 return err;
270}
271
cb67b832 272int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
cb67b832
HHZ
273{
274 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
275 struct mlx5e_rep_priv *rpriv = priv->ppriv;
276 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 277 struct mlx5e_channel *c;
9008ae07
SM
278 int n, tc, num_sqs = 0;
279 int err = -ENOMEM;
5ecadff0 280 u32 *sqs;
cb67b832 281
5ecadff0 282 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
cb67b832 283 if (!sqs)
9008ae07 284 goto out;
cb67b832 285
ff9c852f
SM
286 for (n = 0; n < priv->channels.num; n++) {
287 c = priv->channels.c[n];
cb67b832
HHZ
288 for (tc = 0; tc < c->num_tc; tc++)
289 sqs[num_sqs++] = c->sq[tc].sqn;
290 }
291
f7a68945 292 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
cb67b832 293 kfree(sqs);
9008ae07
SM
294
295out:
296 if (err)
297 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
cb67b832
HHZ
298 return err;
299}
300
cb67b832
HHZ
301void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
302{
303 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
304 struct mlx5e_rep_priv *rpriv = priv->ppriv;
305 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 306
f7a68945 307 mlx5e_sqs2vport_stop(esw, rep);
cb67b832
HHZ
308}
309
f6dfb4c3
HHZ
310static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
311{
312#if IS_ENABLED(CONFIG_IPV6)
423c9db2 313 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
f6dfb4c3
HHZ
314 DELAY_PROBE_TIME);
315#else
316 unsigned long ipv6_interval = ~0UL;
317#endif
318 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
319 DELAY_PROBE_TIME);
5ed99fb4 320 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
321 struct mlx5e_priv *priv = netdev_priv(netdev);
322
323 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
324 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
325}
326
327void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
328{
329 struct mlx5e_rep_priv *rpriv = priv->ppriv;
330 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
331
332 mlx5_fc_queue_stats_work(priv->mdev,
333 &neigh_update->neigh_stats_work,
334 neigh_update->min_interval);
335}
336
337static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
338{
339 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
340 neigh_update.neigh_stats_work.work);
5ed99fb4 341 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
342 struct mlx5e_priv *priv = netdev_priv(netdev);
343 struct mlx5e_neigh_hash_entry *nhe;
344
345 rtnl_lock();
346 if (!list_empty(&rpriv->neigh_update.neigh_list))
347 mlx5e_rep_queue_neigh_stats_work(priv);
348
349 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
350 mlx5e_tc_update_neigh_used_value(nhe);
351
352 rtnl_unlock();
353}
354
232c0013
HHZ
355static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
356{
357 refcount_inc(&nhe->refcnt);
358}
359
360static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
361{
362 if (refcount_dec_and_test(&nhe->refcnt))
363 kfree(nhe);
364}
365
366static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
367 struct mlx5e_encap_entry *e,
368 bool neigh_connected,
369 unsigned char ha[ETH_ALEN])
370{
371 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
372
373 ASSERT_RTNL();
374
375 if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
376 !ether_addr_equal(e->h_dest, ha))
377 mlx5e_tc_encap_flows_del(priv, e);
378
379 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
380 ether_addr_copy(e->h_dest, ha);
381 ether_addr_copy(eth->h_dest, ha);
382
383 mlx5e_tc_encap_flows_add(priv, e);
384 }
385}
386
387static void mlx5e_rep_neigh_update(struct work_struct *work)
388{
389 struct mlx5e_neigh_hash_entry *nhe =
390 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
391 struct neighbour *n = nhe->n;
392 struct mlx5e_encap_entry *e;
393 unsigned char ha[ETH_ALEN];
394 struct mlx5e_priv *priv;
395 bool neigh_connected;
396 bool encap_connected;
397 u8 nud_state, dead;
398
399 rtnl_lock();
400
401 /* If these parameters are changed after we release the lock,
402 * we'll receive another event letting us know about it.
403 * We use this lock to avoid inconsistency between the neigh validity
404 * and it's hw address.
405 */
406 read_lock_bh(&n->lock);
407 memcpy(ha, n->ha, ETH_ALEN);
408 nud_state = n->nud_state;
409 dead = n->dead;
410 read_unlock_bh(&n->lock);
411
412 neigh_connected = (nud_state & NUD_VALID) && !dead;
413
414 list_for_each_entry(e, &nhe->encap_list, encap_list) {
415 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
416 priv = netdev_priv(e->out_dev);
417
418 if (encap_connected != neigh_connected ||
419 !ether_addr_equal(e->h_dest, ha))
420 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
421 }
422 mlx5e_rep_neigh_entry_release(nhe);
423 rtnl_unlock();
424 neigh_release(n);
425}
426
427static struct mlx5e_neigh_hash_entry *
428mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
429 struct mlx5e_neigh *m_neigh);
430
431static int mlx5e_rep_netevent_event(struct notifier_block *nb,
432 unsigned long event, void *ptr)
433{
434 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
435 neigh_update.netevent_nb);
436 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 437 struct net_device *netdev = rpriv->netdev;
232c0013
HHZ
438 struct mlx5e_priv *priv = netdev_priv(netdev);
439 struct mlx5e_neigh_hash_entry *nhe = NULL;
440 struct mlx5e_neigh m_neigh = {};
a2fa1fe5 441 struct neigh_parms *p;
232c0013 442 struct neighbour *n;
a2fa1fe5 443 bool found = false;
232c0013
HHZ
444
445 switch (event) {
446 case NETEVENT_NEIGH_UPDATE:
447 n = ptr;
448#if IS_ENABLED(CONFIG_IPV6)
423c9db2 449 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
232c0013
HHZ
450#else
451 if (n->tbl != &arp_tbl)
452#endif
453 return NOTIFY_DONE;
454
455 m_neigh.dev = n->dev;
f6dfb4c3 456 m_neigh.family = n->ops->family;
232c0013
HHZ
457 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
458
459 /* We are in atomic context and can't take RTNL mutex, so use
460 * spin_lock_bh to lookup the neigh table. bh is used since
461 * netevent can be called from a softirq context.
462 */
463 spin_lock_bh(&neigh_update->encap_lock);
464 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
465 if (!nhe) {
466 spin_unlock_bh(&neigh_update->encap_lock);
467 return NOTIFY_DONE;
468 }
469
470 /* This assignment is valid as long as the the neigh reference
471 * is taken
472 */
473 nhe->n = n;
474
475 /* Take a reference to ensure the neighbour and mlx5 encap
476 * entry won't be destructed until we drop the reference in
477 * delayed work.
478 */
479 neigh_hold(n);
480 mlx5e_rep_neigh_entry_hold(nhe);
481
482 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
483 mlx5e_rep_neigh_entry_release(nhe);
484 neigh_release(n);
485 }
486 spin_unlock_bh(&neigh_update->encap_lock);
487 break;
a2fa1fe5
HHZ
488
489 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
490 p = ptr;
491
492 /* We check the device is present since we don't care about
493 * changes in the default table, we only care about changes
494 * done per device delay prob time parameter.
495 */
496#if IS_ENABLED(CONFIG_IPV6)
423c9db2 497 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
a2fa1fe5
HHZ
498#else
499 if (!p->dev || p->tbl != &arp_tbl)
500#endif
501 return NOTIFY_DONE;
502
503 /* We are in atomic context and can't take RTNL mutex,
504 * so use spin_lock_bh to walk the neigh list and look for
505 * the relevant device. bh is used since netevent can be
506 * called from a softirq context.
507 */
508 spin_lock_bh(&neigh_update->encap_lock);
509 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
510 if (p->dev == nhe->m_neigh.dev) {
511 found = true;
512 break;
513 }
514 }
515 spin_unlock_bh(&neigh_update->encap_lock);
516 if (!found)
517 return NOTIFY_DONE;
518
519 neigh_update->min_interval = min_t(unsigned long,
520 NEIGH_VAR(p, DELAY_PROBE_TIME),
521 neigh_update->min_interval);
522 mlx5_fc_update_sampling_interval(priv->mdev,
523 neigh_update->min_interval);
524 break;
232c0013
HHZ
525 }
526 return NOTIFY_DONE;
527}
528
37b498ff
HHZ
529static const struct rhashtable_params mlx5e_neigh_ht_params = {
530 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
531 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
532 .key_len = sizeof(struct mlx5e_neigh),
533 .automatic_shrinking = true,
534};
535
536static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
537{
538 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
232c0013
HHZ
539 int err;
540
541 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
542 if (err)
543 return err;
37b498ff
HHZ
544
545 INIT_LIST_HEAD(&neigh_update->neigh_list);
232c0013 546 spin_lock_init(&neigh_update->encap_lock);
f6dfb4c3
HHZ
547 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
548 mlx5e_rep_neigh_stats_work);
549 mlx5e_rep_neigh_update_init_interval(rpriv);
232c0013
HHZ
550
551 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
552 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
553 if (err)
554 goto out_err;
555 return 0;
556
557out_err:
558 rhashtable_destroy(&neigh_update->neigh_ht);
559 return err;
37b498ff
HHZ
560}
561
562static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
563{
564 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 565 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
232c0013
HHZ
566
567 unregister_netevent_notifier(&neigh_update->netevent_nb);
568
569 flush_workqueue(priv->wq); /* flush neigh update works */
37b498ff 570
f6dfb4c3
HHZ
571 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
572
37b498ff
HHZ
573 rhashtable_destroy(&neigh_update->neigh_ht);
574}
575
576static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
577 struct mlx5e_neigh_hash_entry *nhe)
578{
579 struct mlx5e_rep_priv *rpriv = priv->ppriv;
580 int err;
581
582 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
583 &nhe->rhash_node,
584 mlx5e_neigh_ht_params);
585 if (err)
586 return err;
587
588 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
589
590 return err;
591}
592
593static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
594 struct mlx5e_neigh_hash_entry *nhe)
595{
596 struct mlx5e_rep_priv *rpriv = priv->ppriv;
597
232c0013
HHZ
598 spin_lock_bh(&rpriv->neigh_update.encap_lock);
599
37b498ff
HHZ
600 list_del(&nhe->neigh_list);
601
602 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
603 &nhe->rhash_node,
604 mlx5e_neigh_ht_params);
232c0013 605 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
37b498ff
HHZ
606}
607
232c0013
HHZ
608/* This function must only be called under RTNL lock or under the
609 * representor's encap_lock in case RTNL mutex can't be held.
610 */
37b498ff
HHZ
611static struct mlx5e_neigh_hash_entry *
612mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
613 struct mlx5e_neigh *m_neigh)
614{
615 struct mlx5e_rep_priv *rpriv = priv->ppriv;
616 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
617
618 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
619 mlx5e_neigh_ht_params);
620}
621
232c0013
HHZ
622static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
623 struct mlx5e_encap_entry *e,
624 struct mlx5e_neigh_hash_entry **nhe)
625{
626 int err;
627
628 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
629 if (!*nhe)
630 return -ENOMEM;
631
632 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
633 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
634 INIT_LIST_HEAD(&(*nhe)->encap_list);
635 refcount_set(&(*nhe)->refcnt, 1);
636
637 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
638 if (err)
639 goto out_free;
640 return 0;
641
642out_free:
643 kfree(*nhe);
644 return err;
645}
646
647static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
648 struct mlx5e_neigh_hash_entry *nhe)
649{
650 /* The neigh hash entry must be removed from the hash table regardless
651 * of the reference count value, so it won't be found by the next
652 * neigh notification call. The neigh hash entry reference count is
653 * incremented only during creation and neigh notification calls and
654 * protects from freeing the nhe struct.
655 */
656 mlx5e_rep_neigh_entry_remove(priv, nhe);
657 mlx5e_rep_neigh_entry_release(nhe);
658}
659
660int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
661 struct mlx5e_encap_entry *e)
662{
663 struct mlx5e_neigh_hash_entry *nhe;
664 int err;
665
666 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
667 if (!nhe) {
668 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
669 if (err)
670 return err;
671 }
672 list_add(&e->encap_list, &nhe->encap_list);
673 return 0;
674}
675
676void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
677 struct mlx5e_encap_entry *e)
678{
679 struct mlx5e_neigh_hash_entry *nhe;
680
681 list_del(&e->encap_list);
682 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
683
684 if (list_empty(&nhe->encap_list))
685 mlx5e_rep_neigh_entry_destroy(priv, nhe);
686}
687
20a1ea67
OG
688static int mlx5e_rep_open(struct net_device *dev)
689{
690 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
691 struct mlx5e_rep_priv *rpriv = priv->ppriv;
692 struct mlx5_eswitch_rep *rep = rpriv->rep;
20a1ea67
OG
693 int err;
694
63bfd399
EBE
695 mutex_lock(&priv->state_lock);
696 err = mlx5e_open_locked(dev);
20a1ea67 697 if (err)
63bfd399 698 goto unlock;
20a1ea67 699
84c9c8f2 700 if (!mlx5_modify_vport_admin_state(priv->mdev,
b3a433de
OG
701 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
702 rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
20a1ea67
OG
703 netif_carrier_on(dev);
704
63bfd399
EBE
705unlock:
706 mutex_unlock(&priv->state_lock);
707 return err;
20a1ea67
OG
708}
709
710static int mlx5e_rep_close(struct net_device *dev)
711{
712 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
713 struct mlx5e_rep_priv *rpriv = priv->ppriv;
714 struct mlx5_eswitch_rep *rep = rpriv->rep;
63bfd399 715 int ret;
20a1ea67 716
63bfd399 717 mutex_lock(&priv->state_lock);
84c9c8f2 718 mlx5_modify_vport_admin_state(priv->mdev,
b3a433de
OG
719 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
720 rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
63bfd399
EBE
721 ret = mlx5e_close_locked(dev);
722 mutex_unlock(&priv->state_lock);
723 return ret;
20a1ea67
OG
724}
725
cb67b832
HHZ
726static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
727 char *buf, size_t len)
728{
729 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
730 struct mlx5e_rep_priv *rpriv = priv->ppriv;
731 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832
HHZ
732 int ret;
733
734 ret = snprintf(buf, len, "%d", rep->vport - 1);
735 if (ret >= len)
736 return -EOPNOTSUPP;
737
738 return 0;
739}
740
de4784ca 741static int
855afa09 742mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
60bd4af8 743 struct tc_cls_flower_offload *cls_flower, int flags)
d957b4e3 744{
8c818c27
JP
745 switch (cls_flower->command) {
746 case TC_CLSFLOWER_REPLACE:
60bd4af8 747 return mlx5e_configure_flower(priv, cls_flower, flags);
8c818c27 748 case TC_CLSFLOWER_DESTROY:
60bd4af8 749 return mlx5e_delete_flower(priv, cls_flower, flags);
8c818c27 750 case TC_CLSFLOWER_STATS:
60bd4af8
OG
751 return mlx5e_stats_flower(priv, cls_flower, flags);
752 default:
753 return -EOPNOTSUPP;
754 }
755}
756
757static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
758 void *cb_priv)
759{
760 struct mlx5e_priv *priv = cb_priv;
761
762 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
763 return -EOPNOTSUPP;
764
765 switch (type) {
766 case TC_SETUP_CLSFLOWER:
767 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
8c818c27 768 default:
a5fcf8a6 769 return -EOPNOTSUPP;
8c818c27
JP
770 }
771}
a5fcf8a6 772
855afa09
JP
773static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
774 void *cb_priv)
775{
776 struct mlx5e_priv *priv = cb_priv;
777
9ab88e83 778 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
44ae12a7
JP
779 return -EOPNOTSUPP;
780
855afa09
JP
781 switch (type) {
782 case TC_SETUP_CLSFLOWER:
60bd4af8 783 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
855afa09
JP
784 default:
785 return -EOPNOTSUPP;
786 }
787}
788
789static int mlx5e_rep_setup_tc_block(struct net_device *dev,
790 struct tc_block_offload *f)
791{
792 struct mlx5e_priv *priv = netdev_priv(dev);
793
794 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
795 return -EOPNOTSUPP;
796
797 switch (f->command) {
798 case TC_BLOCK_BIND:
799 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
800 priv, priv);
801 case TC_BLOCK_UNBIND:
802 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
803 return 0;
804 default:
805 return -EOPNOTSUPP;
806 }
807}
808
8c818c27 809static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 810 void *type_data)
8c818c27 811{
2572ac53 812 switch (type) {
855afa09
JP
813 case TC_SETUP_BLOCK:
814 return mlx5e_rep_setup_tc_block(dev, type_data);
d957b4e3
OG
815 default:
816 return -EOPNOTSUPP;
817 }
818}
819
370bad0f
OG
820bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
821{
370bad0f 822 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
823 struct mlx5e_rep_priv *rpriv = priv->ppriv;
824 struct mlx5_eswitch_rep *rep;
825
826 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
827 return false;
370bad0f 828
1d447a39
SM
829 rep = rpriv->rep;
830 if (esw->mode == SRIOV_OFFLOADS &&
831 rep && rep->vport == FDB_UPLINK_VPORT)
370bad0f
OG
832 return true;
833
834 return false;
835}
836
20fd0c19 837static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
370bad0f 838{
1d447a39
SM
839 struct mlx5e_rep_priv *rpriv = priv->ppriv;
840 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
841
842 if (rep && rep->vport != FDB_UPLINK_VPORT)
843 return true;
844
845 return false;
846}
847
848bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
849{
850 struct mlx5e_priv *priv = netdev_priv(dev);
851
852 switch (attr_id) {
853 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
854 if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
855 return true;
856 }
857
858 return false;
859}
860
861static int
862mlx5e_get_sw_stats64(const struct net_device *dev,
863 struct rtnl_link_stats64 *stats)
864{
865 struct mlx5e_priv *priv = netdev_priv(dev);
866 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
867
868a01a2
SL
868 mlx5e_rep_update_sw_counters(priv);
869
370bad0f
OG
870 stats->rx_packets = sstats->rx_packets;
871 stats->rx_bytes = sstats->rx_bytes;
872 stats->tx_packets = sstats->tx_packets;
873 stats->tx_bytes = sstats->tx_bytes;
874
875 stats->tx_dropped = sstats->tx_queue_dropped;
876
877 return 0;
878}
879
880int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
881 void *sp)
882{
883 switch (attr_id) {
884 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
885 return mlx5e_get_sw_stats64(dev, sp);
886 }
887
888 return -EINVAL;
889}
890
bc1f4470 891static void
370bad0f
OG
892mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
893{
894 struct mlx5e_priv *priv = netdev_priv(dev);
895
896 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
370bad0f
OG
897}
898
cb67b832
HHZ
899static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
900 .switchdev_port_attr_get = mlx5e_attr_get,
901};
902
6f6027a5 903static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu)
250a42b6
AN
904{
905 return mlx5e_change_mtu(netdev, new_mtu, NULL);
906}
907
cb67b832 908static const struct net_device_ops mlx5e_netdev_ops_rep = {
20a1ea67
OG
909 .ndo_open = mlx5e_rep_open,
910 .ndo_stop = mlx5e_rep_close,
cb67b832
HHZ
911 .ndo_start_xmit = mlx5e_xmit,
912 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
8c818c27 913 .ndo_setup_tc = mlx5e_rep_setup_tc,
370bad0f
OG
914 .ndo_get_stats64 = mlx5e_rep_get_stats,
915 .ndo_has_offload_stats = mlx5e_has_offload_stats,
916 .ndo_get_offload_stats = mlx5e_get_offload_stats,
250a42b6 917 .ndo_change_mtu = mlx5e_change_rep_mtu,
cb67b832
HHZ
918};
919
6a9764ef 920static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
99beaa22 921 struct mlx5e_params *params, u16 mtu)
cb67b832 922{
cb67b832
HHZ
923 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
924 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
925 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
926
472a1e44 927 params->hard_mtu = MLX5E_ETH_HARD_MTU;
99beaa22 928 params->sw_mtu = mtu;
4246f698 929 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
99cbfa93 930 params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
c0b458a9 931 params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
cb67b832 932
9a317425 933 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
6a9764ef 934 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
cb67b832 935
6a9764ef
SM
936 params->num_tc = 1;
937 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
5f195c2c
CM
938
939 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
cb67b832
HHZ
940}
941
942static void mlx5e_build_rep_netdev(struct net_device *netdev)
943{
250a42b6
AN
944 struct mlx5e_priv *priv = netdev_priv(netdev);
945 struct mlx5_core_dev *mdev = priv->mdev;
946 u16 max_mtu;
947
cb67b832
HHZ
948 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
949
950 netdev->watchdog_timeo = 15 * HZ;
951
952 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
953
cb67b832 954 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
cb67b832 955
abd32772 956 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
d957b4e3 957 netdev->hw_features |= NETIF_F_HW_TC;
cb67b832
HHZ
958
959 eth_hw_addr_random(netdev);
250a42b6
AN
960
961 netdev->min_mtu = ETH_MIN_MTU;
962 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
963 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
cb67b832
HHZ
964}
965
966static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
967 struct net_device *netdev,
968 const struct mlx5e_profile *profile,
969 void *ppriv)
970{
6a9764ef
SM
971 struct mlx5e_priv *priv = netdev_priv(netdev);
972
973 priv->mdev = mdev;
974 priv->netdev = netdev;
975 priv->profile = profile;
976 priv->ppriv = ppriv;
977
978 mutex_init(&priv->state_lock);
979
980 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
981
982 priv->channels.params.num_channels = profile->max_nch(mdev);
c139dbfd 983
99beaa22 984 mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
cb67b832 985 mlx5e_build_rep_netdev(netdev);
237f258c
FD
986
987 mlx5e_timestamp_init(priv);
cb67b832
HHZ
988}
989
990static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
991{
992 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
993 struct mlx5e_rep_priv *rpriv = priv->ppriv;
994 struct mlx5_eswitch_rep *rep = rpriv->rep;
74491de9 995 struct mlx5_flow_handle *flow_rule;
cb67b832 996 int err;
cb67b832 997
2c3b5bee
SM
998 mlx5e_init_l2_addr(priv);
999
cb67b832 1000 err = mlx5e_create_direct_rqts(priv);
8f493ffd 1001 if (err)
cb67b832 1002 return err;
cb67b832
HHZ
1003
1004 err = mlx5e_create_direct_tirs(priv);
8f493ffd 1005 if (err)
cb67b832 1006 goto err_destroy_direct_rqts;
cb67b832
HHZ
1007
1008 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1009 rep->vport,
1010 priv->direct_tir[0].tirn);
1011 if (IS_ERR(flow_rule)) {
1012 err = PTR_ERR(flow_rule);
1013 goto err_destroy_direct_tirs;
1014 }
5ed99fb4 1015 rpriv->vport_rx_rule = flow_rule;
cb67b832
HHZ
1016
1017 return 0;
1018
1019err_destroy_direct_tirs:
1020 mlx5e_destroy_direct_tirs(priv);
1021err_destroy_direct_rqts:
8f493ffd 1022 mlx5e_destroy_direct_rqts(priv);
cb67b832
HHZ
1023 return err;
1024}
1025
1026static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1027{
1d447a39 1028 struct mlx5e_rep_priv *rpriv = priv->ppriv;
cb67b832 1029
5ed99fb4 1030 mlx5_del_flow_rules(rpriv->vport_rx_rule);
cb67b832 1031 mlx5e_destroy_direct_tirs(priv);
8f493ffd 1032 mlx5e_destroy_direct_rqts(priv);
cb67b832
HHZ
1033}
1034
1035static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1036{
1037 int err;
1038
1039 err = mlx5e_create_tises(priv);
1040 if (err) {
1041 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1042 return err;
1043 }
1044 return 0;
1045}
1046
1047static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
1048{
1049#define MLX5E_PORT_REPRESENTOR_NCH 1
1050 return MLX5E_PORT_REPRESENTOR_NCH;
1051}
1052
39a7e589 1053static const struct mlx5e_profile mlx5e_rep_profile = {
cb67b832
HHZ
1054 .init = mlx5e_init_rep,
1055 .init_rx = mlx5e_init_rep_rx,
1056 .cleanup_rx = mlx5e_cleanup_rep_rx,
1057 .init_tx = mlx5e_init_rep_tx,
1058 .cleanup_tx = mlx5e_cleanup_nic_tx,
868a01a2 1059 .update_stats = mlx5e_rep_update_hw_counters,
cb67b832 1060 .max_nch = mlx5e_get_rep_max_num_channels,
7ca42c80 1061 .update_carrier = NULL,
20fd0c19
SM
1062 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1063 .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
cb67b832
HHZ
1064 .max_tc = 1,
1065};
1066
1d447a39
SM
1067/* e-Switch vport representors */
1068
1069static int
4c66df01 1070mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39 1071{
5ed99fb4
MB
1072 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1073 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
37b498ff
HHZ
1074
1075 int err;
1076
1077 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1078 err = mlx5e_add_sqs_fwd_rules(priv);
1079 if (err)
1080 return err;
1081 }
1082
1083 err = mlx5e_rep_neigh_init(rpriv);
1084 if (err)
1085 goto err_remove_sqs;
1d447a39 1086
655dc3d2
OG
1087 /* init shared tc flow table */
1088 err = mlx5e_tc_esw_init(&rpriv->tc_ht);
1089 if (err)
1090 goto err_neigh_cleanup;
1091
1d447a39 1092 return 0;
37b498ff 1093
655dc3d2
OG
1094err_neigh_cleanup:
1095 mlx5e_rep_neigh_cleanup(rpriv);
37b498ff
HHZ
1096err_remove_sqs:
1097 mlx5e_remove_sqs_fwd_rules(priv);
1098 return err;
1d447a39
SM
1099}
1100
1101static void
4c66df01 1102mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 1103{
5ed99fb4
MB
1104 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1105 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1d447a39
SM
1106
1107 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1108 mlx5e_remove_sqs_fwd_rules(priv);
1109
655dc3d2
OG
1110 /* clean uplink offloaded TC rules, delete shared tc flow table */
1111 mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
37b498ff
HHZ
1112
1113 mlx5e_rep_neigh_cleanup(rpriv);
1d447a39
SM
1114}
1115
1116static int
4c66df01 1117mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39 1118{
5ed99fb4 1119 struct mlx5e_rep_priv *uplink_rpriv;
1d447a39 1120 struct mlx5e_rep_priv *rpriv;
26e59d80 1121 struct net_device *netdev;
d6c862ba 1122 struct mlx5e_priv *upriv;
26e59d80
MHY
1123 int err;
1124
1d447a39
SM
1125 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1126 if (!rpriv)
1127 return -ENOMEM;
1128
4c66df01 1129 netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
26e59d80
MHY
1130 if (!netdev) {
1131 pr_warn("Failed to create representor netdev for vport %d\n",
1132 rep->vport);
1d447a39 1133 kfree(rpriv);
cb67b832
HHZ
1134 return -EINVAL;
1135 }
26e59d80 1136
5ed99fb4 1137 rpriv->netdev = netdev;
1d447a39 1138 rpriv->rep = rep;
a4b97ab4 1139 rep->rep_if[REP_ETH].priv = rpriv;
5ed99fb4 1140 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
26e59d80 1141
2c3b5bee 1142 err = mlx5e_attach_netdev(netdev_priv(netdev));
26e59d80
MHY
1143 if (err) {
1144 pr_warn("Failed to attach representor netdev for vport %d\n",
1145 rep->vport);
1146 goto err_destroy_netdev;
1147 }
1148
37b498ff
HHZ
1149 err = mlx5e_rep_neigh_init(rpriv);
1150 if (err) {
1151 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1152 rep->vport);
1153 goto err_detach_netdev;
1154 }
1155
a4b97ab4 1156 uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
5ed99fb4 1157 upriv = netdev_priv(uplink_rpriv->netdev);
60bd4af8 1158 err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
d6c862ba 1159 upriv);
717503b9
JP
1160 if (err)
1161 goto err_neigh_cleanup;
1162
26e59d80
MHY
1163 err = register_netdev(netdev);
1164 if (err) {
1165 pr_warn("Failed to register representor netdev for vport %d\n",
1166 rep->vport);
717503b9 1167 goto err_egdev_cleanup;
26e59d80
MHY
1168 }
1169
cb67b832 1170 return 0;
26e59d80 1171
717503b9 1172err_egdev_cleanup:
60bd4af8 1173 tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
d6c862ba 1174 upriv);
717503b9 1175
37b498ff
HHZ
1176err_neigh_cleanup:
1177 mlx5e_rep_neigh_cleanup(rpriv);
1178
26e59d80 1179err_detach_netdev:
2c3b5bee 1180 mlx5e_detach_netdev(netdev_priv(netdev));
26e59d80
MHY
1181
1182err_destroy_netdev:
2c3b5bee 1183 mlx5e_destroy_netdev(netdev_priv(netdev));
1d447a39 1184 kfree(rpriv);
26e59d80 1185 return err;
cb67b832
HHZ
1186}
1187
1d447a39 1188static void
4c66df01 1189mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 1190{
5ed99fb4
MB
1191 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1192 struct net_device *netdev = rpriv->netdev;
1d447a39 1193 struct mlx5e_priv *priv = netdev_priv(netdev);
5ed99fb4 1194 struct mlx5e_rep_priv *uplink_rpriv;
1d447a39 1195 void *ppriv = priv->ppriv;
d6c862ba 1196 struct mlx5e_priv *upriv;
cb67b832 1197
5ed99fb4 1198 unregister_netdev(netdev);
a4b97ab4
MB
1199 uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
1200 REP_ETH);
5ed99fb4 1201 upriv = netdev_priv(uplink_rpriv->netdev);
60bd4af8 1202 tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
d6c862ba 1203 upriv);
37b498ff 1204 mlx5e_rep_neigh_cleanup(rpriv);
1d447a39
SM
1205 mlx5e_detach_netdev(priv);
1206 mlx5e_destroy_netdev(priv);
1207 kfree(ppriv); /* mlx5e_rep_priv */
1208}
1209
22215908
MB
1210static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1211{
1212 struct mlx5e_rep_priv *rpriv;
1213
1214 rpriv = mlx5e_rep_to_rep_priv(rep);
1215
1216 return rpriv->netdev;
1217}
1218
1d447a39
SM
1219static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1220{
1221 struct mlx5_core_dev *mdev = priv->mdev;
1222 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1223 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1224 int vport;
1d447a39
SM
1225
1226 for (vport = 1; vport < total_vfs; vport++) {
a4b97ab4 1227 struct mlx5_eswitch_rep_if rep_if = {};
1d447a39 1228
a4b97ab4
MB
1229 rep_if.load = mlx5e_vport_rep_load;
1230 rep_if.unload = mlx5e_vport_rep_unload;
22215908 1231 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
a4b97ab4 1232 mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
1d447a39
SM
1233 }
1234}
1235
1236static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1237{
1238 struct mlx5_core_dev *mdev = priv->mdev;
1239 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1240 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1241 int vport;
1242
1243 for (vport = 1; vport < total_vfs; vport++)
a4b97ab4 1244 mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
1d447a39
SM
1245}
1246
1247void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1248{
1249 struct mlx5_core_dev *mdev = priv->mdev;
1250 struct mlx5_eswitch *esw = mdev->priv.eswitch;
a4b97ab4 1251 struct mlx5_eswitch_rep_if rep_if;
5ed99fb4 1252 struct mlx5e_rep_priv *rpriv;
1d447a39 1253
5ed99fb4
MB
1254 rpriv = priv->ppriv;
1255 rpriv->netdev = priv->netdev;
1256
a4b97ab4
MB
1257 rep_if.load = mlx5e_nic_rep_load;
1258 rep_if.unload = mlx5e_nic_rep_unload;
22215908 1259 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
a4b97ab4 1260 rep_if.priv = rpriv;
5ed99fb4 1261 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
a4b97ab4 1262 mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
1d447a39
SM
1263
1264 mlx5e_rep_register_vf_vports(priv); /* VFs vports */
1265}
1266
1267void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1268{
1269 struct mlx5_core_dev *mdev = priv->mdev;
1270 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1271
1272 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
a4b97ab4 1273 mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
cb67b832 1274}
07c9f1e5
SM
1275
1276void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
1277{
1278 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1279 struct mlx5e_rep_priv *rpriv;
1280
1281 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1282 if (!rpriv)
1283 return NULL;
1284
1285 rpriv->rep = &esw->offloads.vport_reps[0];
1286 return rpriv;
1287}