Commit | Line | Data |
---|---|---|
cb67b832 HHZ |
1 | /* |
2 | * Copyright (c) 2016, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <generated/utsrelease.h> | |
34 | #include <linux/mlx5/fs.h> | |
35 | #include <net/switchdev.h> | |
d957b4e3 | 36 | #include <net/pkt_cls.h> |
717503b9 | 37 | #include <net/act_api.h> |
232c0013 HHZ |
38 | #include <net/netevent.h> |
39 | #include <net/arp.h> | |
f60f315d | 40 | #include <net/devlink.h> |
cb67b832 HHZ |
41 | |
42 | #include "eswitch.h" | |
43 | #include "en.h" | |
1d447a39 | 44 | #include "en_rep.h" |
adb4c123 | 45 | #include "en_tc.h" |
101f4de9 | 46 | #include "en/tc_tun.h" |
f6dfb4c3 | 47 | #include "fs_core.h" |
97417f61 | 48 | #include "lib/port_tun.h" |
cb67b832 | 49 | |
4c8fb298 | 50 | #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ |
e7164313 | 51 | max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) |
8956f001 | 52 | #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 |
4246f698 | 53 | |
cb67b832 HHZ |
54 | static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; |
55 | ||
f5bc2c5d OS |
56 | struct mlx5e_rep_indr_block_priv { |
57 | struct net_device *netdev; | |
58 | struct mlx5e_rep_priv *rpriv; | |
59 | ||
60 | struct list_head list; | |
61 | }; | |
62 | ||
25f2d0e7 EB |
63 | static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, |
64 | struct net_device *netdev); | |
f5bc2c5d | 65 | |
cb67b832 HHZ |
66 | static void mlx5e_rep_get_drvinfo(struct net_device *dev, |
67 | struct ethtool_drvinfo *drvinfo) | |
68 | { | |
cf83c8fd DL |
69 | struct mlx5e_priv *priv = netdev_priv(dev); |
70 | struct mlx5_core_dev *mdev = priv->mdev; | |
71 | ||
cb67b832 HHZ |
72 | strlcpy(drvinfo->driver, mlx5e_rep_driver_name, |
73 | sizeof(drvinfo->driver)); | |
74 | strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); | |
cf83c8fd DL |
75 | snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), |
76 | "%d.%d.%04d (%.16s)", | |
77 | fw_rev_maj(mdev), fw_rev_min(mdev), | |
78 | fw_rev_sub(mdev), mdev->board_id); | |
79 | } | |
80 | ||
81 | static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev, | |
82 | struct ethtool_drvinfo *drvinfo) | |
83 | { | |
84 | struct mlx5e_priv *priv = netdev_priv(dev); | |
85 | ||
86 | mlx5e_rep_get_drvinfo(dev, drvinfo); | |
87 | strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev), | |
88 | sizeof(drvinfo->bus_info)); | |
cb67b832 HHZ |
89 | } |
90 | ||
91 | static const struct counter_desc sw_rep_stats_desc[] = { | |
92 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, | |
93 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, | |
94 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, | |
95 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, | |
96 | }; | |
97 | ||
a228060a OG |
98 | struct vport_stats { |
99 | u64 vport_rx_packets; | |
100 | u64 vport_tx_packets; | |
101 | u64 vport_rx_bytes; | |
102 | u64 vport_tx_bytes; | |
103 | }; | |
104 | ||
105 | static const struct counter_desc vport_rep_stats_desc[] = { | |
106 | { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) }, | |
107 | { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) }, | |
108 | { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) }, | |
109 | { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) }, | |
110 | }; | |
111 | ||
112 | #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) | |
113 | #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc) | |
cb67b832 HHZ |
114 | |
115 | static void mlx5e_rep_get_strings(struct net_device *dev, | |
116 | u32 stringset, uint8_t *data) | |
117 | { | |
a228060a | 118 | int i, j; |
cb67b832 HHZ |
119 | |
120 | switch (stringset) { | |
121 | case ETH_SS_STATS: | |
a228060a | 122 | for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) |
cb67b832 HHZ |
123 | strcpy(data + (i * ETH_GSTRING_LEN), |
124 | sw_rep_stats_desc[i].format); | |
a228060a OG |
125 | for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++) |
126 | strcpy(data + (i * ETH_GSTRING_LEN), | |
127 | vport_rep_stats_desc[j].format); | |
cb67b832 HHZ |
128 | break; |
129 | } | |
130 | } | |
131 | ||
9b81d5a9 | 132 | static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) |
370bad0f OG |
133 | { |
134 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
1d447a39 SM |
135 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
136 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
370bad0f OG |
137 | struct rtnl_link_stats64 *vport_stats; |
138 | struct ifla_vf_stats vf_stats; | |
139 | int err; | |
140 | ||
141 | err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); | |
142 | if (err) { | |
143 | pr_warn("vport %d error %d reading stats\n", rep->vport, err); | |
144 | return; | |
145 | } | |
146 | ||
147 | vport_stats = &priv->stats.vf_vport; | |
148 | /* flip tx/rx as we are reporting the counters for the switch vport */ | |
149 | vport_stats->rx_packets = vf_stats.tx_packets; | |
150 | vport_stats->rx_bytes = vf_stats.tx_bytes; | |
151 | vport_stats->tx_packets = vf_stats.rx_packets; | |
152 | vport_stats->tx_bytes = vf_stats.rx_bytes; | |
153 | } | |
154 | ||
d9ee0491 OG |
155 | static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv) |
156 | { | |
157 | struct mlx5e_pport_stats *pstats = &priv->stats.pport; | |
158 | struct rtnl_link_stats64 *vport_stats; | |
159 | ||
160 | mlx5e_grp_802_3_update_stats(priv); | |
161 | ||
162 | vport_stats = &priv->stats.vf_vport; | |
163 | ||
164 | vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); | |
165 | vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); | |
166 | vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); | |
167 | vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); | |
168 | } | |
169 | ||
370bad0f | 170 | static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) |
cb67b832 HHZ |
171 | { |
172 | struct mlx5e_sw_stats *s = &priv->stats.sw; | |
b832d4fd | 173 | struct rtnl_link_stats64 stats64 = {}; |
cb67b832 HHZ |
174 | |
175 | memset(s, 0, sizeof(*s)); | |
b832d4fd | 176 | mlx5e_fold_sw_stats64(priv, &stats64); |
cb67b832 | 177 | |
b832d4fd SM |
178 | s->rx_packets = stats64.rx_packets; |
179 | s->rx_bytes = stats64.rx_bytes; | |
180 | s->tx_packets = stats64.tx_packets; | |
181 | s->tx_bytes = stats64.tx_bytes; | |
182 | s->tx_queue_dropped = stats64.tx_dropped; | |
370bad0f OG |
183 | } |
184 | ||
cb67b832 HHZ |
185 | static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, |
186 | struct ethtool_stats *stats, u64 *data) | |
187 | { | |
188 | struct mlx5e_priv *priv = netdev_priv(dev); | |
a228060a | 189 | int i, j; |
cb67b832 HHZ |
190 | |
191 | if (!data) | |
192 | return; | |
193 | ||
194 | mutex_lock(&priv->state_lock); | |
168af00a | 195 | mlx5e_rep_update_sw_counters(priv); |
9b81d5a9 | 196 | priv->profile->update_stats(priv); |
cb67b832 HHZ |
197 | mutex_unlock(&priv->state_lock); |
198 | ||
a228060a | 199 | for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) |
cb67b832 HHZ |
200 | data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, |
201 | sw_rep_stats_desc, i); | |
a228060a OG |
202 | |
203 | for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++) | |
204 | data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport, | |
205 | vport_rep_stats_desc, j); | |
cb67b832 HHZ |
206 | } |
207 | ||
208 | static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) | |
209 | { | |
210 | switch (sset) { | |
211 | case ETH_SS_STATS: | |
a228060a | 212 | return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS; |
cb67b832 HHZ |
213 | default: |
214 | return -EOPNOTSUPP; | |
215 | } | |
216 | } | |
217 | ||
f128f138 GT |
218 | static void mlx5e_rep_get_ringparam(struct net_device *dev, |
219 | struct ethtool_ringparam *param) | |
220 | { | |
221 | struct mlx5e_priv *priv = netdev_priv(dev); | |
222 | ||
223 | mlx5e_ethtool_get_ringparam(priv, param); | |
224 | } | |
225 | ||
226 | static int mlx5e_rep_set_ringparam(struct net_device *dev, | |
227 | struct ethtool_ringparam *param) | |
228 | { | |
229 | struct mlx5e_priv *priv = netdev_priv(dev); | |
230 | ||
231 | return mlx5e_ethtool_set_ringparam(priv, param); | |
232 | } | |
233 | ||
84a09733 GT |
234 | static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv, |
235 | struct mlx5_flow_destination *dest) | |
236 | { | |
237 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
238 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
239 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
240 | struct mlx5_flow_handle *flow_rule; | |
241 | ||
242 | flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, | |
243 | rep->vport, | |
244 | dest); | |
245 | if (IS_ERR(flow_rule)) | |
246 | return PTR_ERR(flow_rule); | |
247 | ||
248 | mlx5_del_flow_rules(rpriv->vport_rx_rule); | |
249 | rpriv->vport_rx_rule = flow_rule; | |
250 | return 0; | |
251 | } | |
252 | ||
253 | static void mlx5e_rep_get_channels(struct net_device *dev, | |
254 | struct ethtool_channels *ch) | |
255 | { | |
256 | struct mlx5e_priv *priv = netdev_priv(dev); | |
257 | ||
258 | mlx5e_ethtool_get_channels(priv, ch); | |
259 | } | |
260 | ||
261 | static int mlx5e_rep_set_channels(struct net_device *dev, | |
262 | struct ethtool_channels *ch) | |
263 | { | |
264 | struct mlx5e_priv *priv = netdev_priv(dev); | |
265 | u16 curr_channels_amount = priv->channels.params.num_channels; | |
266 | u32 new_channels_amount = ch->combined_count; | |
267 | struct mlx5_flow_destination new_dest; | |
268 | int err = 0; | |
269 | ||
270 | err = mlx5e_ethtool_set_channels(priv, ch); | |
271 | if (err) | |
272 | return err; | |
273 | ||
274 | if (curr_channels_amount == 1 && new_channels_amount > 1) { | |
275 | new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; | |
276 | new_dest.ft = priv->fs.ttc.ft.t; | |
277 | } else if (new_channels_amount == 1 && curr_channels_amount > 1) { | |
278 | new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; | |
279 | new_dest.tir_num = priv->direct_tir[0].tirn; | |
280 | } else { | |
281 | return 0; | |
282 | } | |
283 | ||
284 | err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest); | |
285 | if (err) { | |
286 | netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n", | |
287 | curr_channels_amount, new_channels_amount); | |
288 | return err; | |
289 | } | |
290 | ||
291 | return 0; | |
292 | } | |
293 | ||
ff9b85de OG |
294 | static int mlx5e_rep_get_coalesce(struct net_device *netdev, |
295 | struct ethtool_coalesce *coal) | |
296 | { | |
297 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
298 | ||
299 | return mlx5e_ethtool_get_coalesce(priv, coal); | |
300 | } | |
301 | ||
302 | static int mlx5e_rep_set_coalesce(struct net_device *netdev, | |
303 | struct ethtool_coalesce *coal) | |
304 | { | |
305 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
306 | ||
307 | return mlx5e_ethtool_set_coalesce(priv, coal); | |
308 | } | |
309 | ||
84a09733 GT |
310 | static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev) |
311 | { | |
312 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
313 | ||
314 | return mlx5e_ethtool_get_rxfh_key_size(priv); | |
315 | } | |
316 | ||
317 | static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) | |
318 | { | |
319 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
320 | ||
321 | return mlx5e_ethtool_get_rxfh_indir_size(priv); | |
322 | } | |
323 | ||
ff9b85de OG |
324 | static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev, |
325 | struct ethtool_pauseparam *pauseparam) | |
326 | { | |
327 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
328 | ||
329 | mlx5e_ethtool_get_pauseparam(priv, pauseparam); | |
330 | } | |
331 | ||
332 | static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev, | |
333 | struct ethtool_pauseparam *pauseparam) | |
334 | { | |
335 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
336 | ||
337 | return mlx5e_ethtool_set_pauseparam(priv, pauseparam); | |
338 | } | |
339 | ||
340 | static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev, | |
341 | struct ethtool_link_ksettings *link_ksettings) | |
342 | { | |
343 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
344 | ||
345 | return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); | |
346 | } | |
347 | ||
348 | static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev, | |
349 | const struct ethtool_link_ksettings *link_ksettings) | |
350 | { | |
351 | struct mlx5e_priv *priv = netdev_priv(netdev); | |
352 | ||
353 | return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings); | |
354 | } | |
355 | ||
9b81d5a9 | 356 | static const struct ethtool_ops mlx5e_rep_ethtool_ops = { |
ff9b85de OG |
357 | .get_drvinfo = mlx5e_rep_get_drvinfo, |
358 | .get_link = ethtool_op_get_link, | |
359 | .get_strings = mlx5e_rep_get_strings, | |
360 | .get_sset_count = mlx5e_rep_get_sset_count, | |
361 | .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, | |
362 | .get_ringparam = mlx5e_rep_get_ringparam, | |
363 | .set_ringparam = mlx5e_rep_set_ringparam, | |
364 | .get_channels = mlx5e_rep_get_channels, | |
365 | .set_channels = mlx5e_rep_set_channels, | |
366 | .get_coalesce = mlx5e_rep_get_coalesce, | |
367 | .set_coalesce = mlx5e_rep_set_coalesce, | |
368 | .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, | |
369 | .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, | |
370 | }; | |
371 | ||
372 | static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { | |
cf83c8fd | 373 | .get_drvinfo = mlx5e_uplink_rep_get_drvinfo, |
cb67b832 HHZ |
374 | .get_link = ethtool_op_get_link, |
375 | .get_strings = mlx5e_rep_get_strings, | |
376 | .get_sset_count = mlx5e_rep_get_sset_count, | |
377 | .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, | |
f128f138 GT |
378 | .get_ringparam = mlx5e_rep_get_ringparam, |
379 | .set_ringparam = mlx5e_rep_set_ringparam, | |
84a09733 GT |
380 | .get_channels = mlx5e_rep_get_channels, |
381 | .set_channels = mlx5e_rep_set_channels, | |
ff9b85de OG |
382 | .get_coalesce = mlx5e_rep_get_coalesce, |
383 | .set_coalesce = mlx5e_rep_set_coalesce, | |
384 | .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings, | |
385 | .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings, | |
84a09733 GT |
386 | .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, |
387 | .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, | |
ff9b85de OG |
388 | .get_pauseparam = mlx5e_uplink_rep_get_pauseparam, |
389 | .set_pauseparam = mlx5e_uplink_rep_set_pauseparam, | |
cb67b832 HHZ |
390 | }; |
391 | ||
6dcfa234 FF |
392 | static int mlx5e_rep_get_port_parent_id(struct net_device *dev, |
393 | struct netdev_phys_item_id *ppid) | |
cb67b832 | 394 | { |
7ff40a46 PB |
395 | struct mlx5_eswitch *esw; |
396 | struct mlx5e_priv *priv; | |
397 | u64 parent_id; | |
398 | ||
399 | priv = netdev_priv(dev); | |
400 | esw = priv->mdev->priv.eswitch; | |
cb67b832 | 401 | |
f6455de0 | 402 | if (esw->mode == MLX5_ESWITCH_NONE) |
cb67b832 HHZ |
403 | return -EOPNOTSUPP; |
404 | ||
7ff40a46 PB |
405 | parent_id = mlx5_query_nic_system_image_guid(priv->mdev); |
406 | ppid->id_len = sizeof(parent_id); | |
407 | memcpy(ppid->id, &parent_id, sizeof(parent_id)); | |
cb67b832 HHZ |
408 | |
409 | return 0; | |
410 | } | |
411 | ||
f7a68945 MB |
412 | static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, |
413 | struct mlx5_eswitch_rep *rep) | |
414 | { | |
2c47bf80 | 415 | struct mlx5e_rep_sq *rep_sq, *tmp; |
5ed99fb4 | 416 | struct mlx5e_rep_priv *rpriv; |
f7a68945 | 417 | |
f6455de0 | 418 | if (esw->mode != MLX5_ESWITCH_OFFLOADS) |
f7a68945 MB |
419 | return; |
420 | ||
5ed99fb4 | 421 | rpriv = mlx5e_rep_to_rep_priv(rep); |
2c47bf80 MB |
422 | list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { |
423 | mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); | |
424 | list_del(&rep_sq->list); | |
425 | kfree(rep_sq); | |
f7a68945 MB |
426 | } |
427 | } | |
428 | ||
429 | static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, | |
430 | struct mlx5_eswitch_rep *rep, | |
5ecadff0 | 431 | u32 *sqns_array, int sqns_num) |
f7a68945 MB |
432 | { |
433 | struct mlx5_flow_handle *flow_rule; | |
5ed99fb4 | 434 | struct mlx5e_rep_priv *rpriv; |
2c47bf80 | 435 | struct mlx5e_rep_sq *rep_sq; |
f7a68945 MB |
436 | int err; |
437 | int i; | |
438 | ||
f6455de0 | 439 | if (esw->mode != MLX5_ESWITCH_OFFLOADS) |
f7a68945 MB |
440 | return 0; |
441 | ||
5ed99fb4 | 442 | rpriv = mlx5e_rep_to_rep_priv(rep); |
f7a68945 | 443 | for (i = 0; i < sqns_num; i++) { |
2c47bf80 MB |
444 | rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); |
445 | if (!rep_sq) { | |
f7a68945 MB |
446 | err = -ENOMEM; |
447 | goto out_err; | |
448 | } | |
449 | ||
450 | /* Add re-inject rule to the PF/representor sqs */ | |
451 | flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, | |
452 | rep->vport, | |
453 | sqns_array[i]); | |
454 | if (IS_ERR(flow_rule)) { | |
455 | err = PTR_ERR(flow_rule); | |
2c47bf80 | 456 | kfree(rep_sq); |
f7a68945 MB |
457 | goto out_err; |
458 | } | |
2c47bf80 MB |
459 | rep_sq->send_to_vport_rule = flow_rule; |
460 | list_add(&rep_sq->list, &rpriv->vport_sqs_list); | |
f7a68945 MB |
461 | } |
462 | return 0; | |
463 | ||
464 | out_err: | |
465 | mlx5e_sqs2vport_stop(esw, rep); | |
466 | return err; | |
467 | } | |
468 | ||
cb67b832 | 469 | int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) |
cb67b832 HHZ |
470 | { |
471 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
1d447a39 SM |
472 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
473 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
cb67b832 | 474 | struct mlx5e_channel *c; |
9008ae07 SM |
475 | int n, tc, num_sqs = 0; |
476 | int err = -ENOMEM; | |
5ecadff0 | 477 | u32 *sqs; |
cb67b832 | 478 | |
5ecadff0 | 479 | sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL); |
cb67b832 | 480 | if (!sqs) |
9008ae07 | 481 | goto out; |
cb67b832 | 482 | |
ff9c852f SM |
483 | for (n = 0; n < priv->channels.num; n++) { |
484 | c = priv->channels.c[n]; | |
cb67b832 HHZ |
485 | for (tc = 0; tc < c->num_tc; tc++) |
486 | sqs[num_sqs++] = c->sq[tc].sqn; | |
487 | } | |
488 | ||
f7a68945 | 489 | err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs); |
cb67b832 | 490 | kfree(sqs); |
9008ae07 SM |
491 | |
492 | out: | |
493 | if (err) | |
494 | netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err); | |
cb67b832 HHZ |
495 | return err; |
496 | } | |
497 | ||
cb67b832 HHZ |
498 | void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) |
499 | { | |
500 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
1d447a39 SM |
501 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
502 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
cb67b832 | 503 | |
f7a68945 | 504 | mlx5e_sqs2vport_stop(esw, rep); |
cb67b832 HHZ |
505 | } |
506 | ||
f6dfb4c3 HHZ |
507 | static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) |
508 | { | |
509 | #if IS_ENABLED(CONFIG_IPV6) | |
423c9db2 | 510 | unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms, |
f6dfb4c3 HHZ |
511 | DELAY_PROBE_TIME); |
512 | #else | |
513 | unsigned long ipv6_interval = ~0UL; | |
514 | #endif | |
515 | unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, | |
516 | DELAY_PROBE_TIME); | |
5ed99fb4 | 517 | struct net_device *netdev = rpriv->netdev; |
f6dfb4c3 HHZ |
518 | struct mlx5e_priv *priv = netdev_priv(netdev); |
519 | ||
520 | rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval); | |
521 | mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval); | |
522 | } | |
523 | ||
524 | void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv) | |
525 | { | |
526 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
527 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
528 | ||
529 | mlx5_fc_queue_stats_work(priv->mdev, | |
530 | &neigh_update->neigh_stats_work, | |
531 | neigh_update->min_interval); | |
532 | } | |
533 | ||
534 | static void mlx5e_rep_neigh_stats_work(struct work_struct *work) | |
535 | { | |
536 | struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, | |
537 | neigh_update.neigh_stats_work.work); | |
5ed99fb4 | 538 | struct net_device *netdev = rpriv->netdev; |
f6dfb4c3 HHZ |
539 | struct mlx5e_priv *priv = netdev_priv(netdev); |
540 | struct mlx5e_neigh_hash_entry *nhe; | |
541 | ||
542 | rtnl_lock(); | |
543 | if (!list_empty(&rpriv->neigh_update.neigh_list)) | |
544 | mlx5e_rep_queue_neigh_stats_work(priv); | |
545 | ||
546 | list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list) | |
547 | mlx5e_tc_update_neigh_used_value(nhe); | |
548 | ||
549 | rtnl_unlock(); | |
550 | } | |
551 | ||
232c0013 HHZ |
552 | static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe) |
553 | { | |
554 | refcount_inc(&nhe->refcnt); | |
555 | } | |
556 | ||
557 | static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe) | |
558 | { | |
559 | if (refcount_dec_and_test(&nhe->refcnt)) | |
560 | kfree(nhe); | |
561 | } | |
562 | ||
563 | static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, | |
564 | struct mlx5e_encap_entry *e, | |
565 | bool neigh_connected, | |
566 | unsigned char ha[ETH_ALEN]) | |
567 | { | |
568 | struct ethhdr *eth = (struct ethhdr *)e->encap_header; | |
569 | ||
570 | ASSERT_RTNL(); | |
571 | ||
61c806da OG |
572 | if ((e->flags & MLX5_ENCAP_ENTRY_VALID) && |
573 | (!neigh_connected || !ether_addr_equal(e->h_dest, ha))) | |
232c0013 HHZ |
574 | mlx5e_tc_encap_flows_del(priv, e); |
575 | ||
576 | if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { | |
577 | ether_addr_copy(e->h_dest, ha); | |
578 | ether_addr_copy(eth->h_dest, ha); | |
6707f74b TZ |
579 | /* Update the encap source mac, in case that we delete |
580 | * the flows when encap source mac changed. | |
581 | */ | |
582 | ether_addr_copy(eth->h_source, e->route_dev->dev_addr); | |
232c0013 HHZ |
583 | |
584 | mlx5e_tc_encap_flows_add(priv, e); | |
585 | } | |
586 | } | |
587 | ||
588 | static void mlx5e_rep_neigh_update(struct work_struct *work) | |
589 | { | |
590 | struct mlx5e_neigh_hash_entry *nhe = | |
591 | container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work); | |
592 | struct neighbour *n = nhe->n; | |
593 | struct mlx5e_encap_entry *e; | |
594 | unsigned char ha[ETH_ALEN]; | |
595 | struct mlx5e_priv *priv; | |
596 | bool neigh_connected; | |
597 | bool encap_connected; | |
598 | u8 nud_state, dead; | |
599 | ||
600 | rtnl_lock(); | |
601 | ||
602 | /* If these parameters are changed after we release the lock, | |
603 | * we'll receive another event letting us know about it. | |
604 | * We use this lock to avoid inconsistency between the neigh validity | |
605 | * and it's hw address. | |
606 | */ | |
607 | read_lock_bh(&n->lock); | |
608 | memcpy(ha, n->ha, ETH_ALEN); | |
609 | nud_state = n->nud_state; | |
610 | dead = n->dead; | |
611 | read_unlock_bh(&n->lock); | |
612 | ||
613 | neigh_connected = (nud_state & NUD_VALID) && !dead; | |
614 | ||
615 | list_for_each_entry(e, &nhe->encap_list, encap_list) { | |
616 | encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); | |
617 | priv = netdev_priv(e->out_dev); | |
618 | ||
619 | if (encap_connected != neigh_connected || | |
620 | !ether_addr_equal(e->h_dest, ha)) | |
621 | mlx5e_rep_update_flows(priv, e, neigh_connected, ha); | |
622 | } | |
623 | mlx5e_rep_neigh_entry_release(nhe); | |
624 | rtnl_unlock(); | |
625 | neigh_release(n); | |
626 | } | |
627 | ||
f5bc2c5d OS |
628 | static struct mlx5e_rep_indr_block_priv * |
629 | mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, | |
630 | struct net_device *netdev) | |
631 | { | |
632 | struct mlx5e_rep_indr_block_priv *cb_priv; | |
633 | ||
634 | /* All callback list access should be protected by RTNL. */ | |
635 | ASSERT_RTNL(); | |
636 | ||
637 | list_for_each_entry(cb_priv, | |
638 | &rpriv->uplink_priv.tc_indr_block_priv_list, | |
639 | list) | |
640 | if (cb_priv->netdev == netdev) | |
641 | return cb_priv; | |
642 | ||
643 | return NULL; | |
644 | } | |
645 | ||
646 | static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) | |
647 | { | |
648 | struct mlx5e_rep_indr_block_priv *cb_priv, *temp; | |
649 | struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; | |
650 | ||
651 | list_for_each_entry_safe(cb_priv, temp, head, list) { | |
25f2d0e7 | 652 | mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev); |
f5bc2c5d OS |
653 | kfree(cb_priv); |
654 | } | |
655 | } | |
656 | ||
657 | static int | |
658 | mlx5e_rep_indr_offload(struct net_device *netdev, | |
f9e30088 | 659 | struct flow_cls_offload *flower, |
f5bc2c5d OS |
660 | struct mlx5e_rep_indr_block_priv *indr_priv) |
661 | { | |
ef381359 | 662 | struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); |
d9ee0491 OG |
663 | int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD; |
664 | int err = 0; | |
ef381359 OS |
665 | |
666 | switch (flower->command) { | |
f9e30088 | 667 | case FLOW_CLS_REPLACE: |
d9ee0491 | 668 | err = mlx5e_configure_flower(netdev, priv, flower, flags); |
ef381359 | 669 | break; |
f9e30088 | 670 | case FLOW_CLS_DESTROY: |
d9ee0491 | 671 | err = mlx5e_delete_flower(netdev, priv, flower, flags); |
ef381359 | 672 | break; |
f9e30088 | 673 | case FLOW_CLS_STATS: |
d9ee0491 | 674 | err = mlx5e_stats_flower(netdev, priv, flower, flags); |
ef381359 OS |
675 | break; |
676 | default: | |
677 | err = -EOPNOTSUPP; | |
678 | } | |
679 | ||
680 | return err; | |
f5bc2c5d OS |
681 | } |
682 | ||
683 | static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type, | |
684 | void *type_data, void *indr_priv) | |
685 | { | |
686 | struct mlx5e_rep_indr_block_priv *priv = indr_priv; | |
687 | ||
688 | switch (type) { | |
689 | case TC_SETUP_CLSFLOWER: | |
690 | return mlx5e_rep_indr_offload(priv->netdev, type_data, priv); | |
691 | default: | |
692 | return -EOPNOTSUPP; | |
693 | } | |
694 | } | |
695 | ||
955bcb6e PNA |
696 | static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv) |
697 | { | |
698 | struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv; | |
699 | ||
700 | list_del(&indr_priv->list); | |
701 | kfree(indr_priv); | |
702 | } | |
703 | ||
704 | static LIST_HEAD(mlx5e_block_cb_list); | |
705 | ||
f5bc2c5d OS |
706 | static int |
707 | mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, | |
708 | struct mlx5e_rep_priv *rpriv, | |
955bcb6e | 709 | struct flow_block_offload *f) |
f5bc2c5d OS |
710 | { |
711 | struct mlx5e_rep_indr_block_priv *indr_priv; | |
955bcb6e | 712 | struct flow_block_cb *block_cb; |
f5bc2c5d | 713 | |
32f8c409 | 714 | if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
f5bc2c5d OS |
715 | return -EOPNOTSUPP; |
716 | ||
955bcb6e PNA |
717 | f->driver_block_list = &mlx5e_block_cb_list; |
718 | ||
f5bc2c5d | 719 | switch (f->command) { |
9c0e189e | 720 | case FLOW_BLOCK_BIND: |
f5bc2c5d OS |
721 | indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); |
722 | if (indr_priv) | |
723 | return -EEXIST; | |
724 | ||
0d4fd02e PNA |
725 | if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb, |
726 | indr_priv, &mlx5e_block_cb_list)) | |
727 | return -EBUSY; | |
728 | ||
f5bc2c5d OS |
729 | indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); |
730 | if (!indr_priv) | |
731 | return -ENOMEM; | |
732 | ||
733 | indr_priv->netdev = netdev; | |
734 | indr_priv->rpriv = rpriv; | |
735 | list_add(&indr_priv->list, | |
736 | &rpriv->uplink_priv.tc_indr_block_priv_list); | |
737 | ||
0c7294dd | 738 | block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb, |
955bcb6e PNA |
739 | indr_priv, indr_priv, |
740 | mlx5e_rep_indr_tc_block_unbind); | |
741 | if (IS_ERR(block_cb)) { | |
f5bc2c5d OS |
742 | list_del(&indr_priv->list); |
743 | kfree(indr_priv); | |
955bcb6e | 744 | return PTR_ERR(block_cb); |
f5bc2c5d | 745 | } |
955bcb6e PNA |
746 | flow_block_cb_add(block_cb, f); |
747 | list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list); | |
f5bc2c5d | 748 | |
955bcb6e | 749 | return 0; |
9c0e189e | 750 | case FLOW_BLOCK_UNBIND: |
25f2d0e7 EB |
751 | indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); |
752 | if (!indr_priv) | |
753 | return -ENOENT; | |
754 | ||
14bfb13f | 755 | block_cb = flow_block_cb_lookup(f->block, |
955bcb6e PNA |
756 | mlx5e_rep_indr_setup_block_cb, |
757 | indr_priv); | |
758 | if (!block_cb) | |
759 | return -ENOENT; | |
f5bc2c5d | 760 | |
955bcb6e PNA |
761 | flow_block_cb_remove(block_cb, f); |
762 | list_del(&block_cb->driver_list); | |
f5bc2c5d OS |
763 | return 0; |
764 | default: | |
765 | return -EOPNOTSUPP; | |
766 | } | |
767 | return 0; | |
768 | } | |
769 | ||
770 | static | |
771 | int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, | |
772 | enum tc_setup_type type, void *type_data) | |
773 | { | |
774 | switch (type) { | |
775 | case TC_SETUP_BLOCK: | |
776 | return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv, | |
777 | type_data); | |
778 | default: | |
779 | return -EOPNOTSUPP; | |
780 | } | |
781 | } | |
782 | ||
783 | static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, | |
784 | struct net_device *netdev) | |
785 | { | |
786 | int err; | |
787 | ||
788 | err = __tc_indr_block_cb_register(netdev, rpriv, | |
789 | mlx5e_rep_indr_setup_tc_cb, | |
25f2d0e7 | 790 | rpriv); |
f5bc2c5d OS |
791 | if (err) { |
792 | struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); | |
793 | ||
794 | mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n", | |
795 | netdev_name(netdev), err); | |
796 | } | |
797 | return err; | |
798 | } | |
799 | ||
25f2d0e7 EB |
800 | static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, |
801 | struct net_device *netdev) | |
f5bc2c5d OS |
802 | { |
803 | __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, | |
25f2d0e7 | 804 | rpriv); |
f5bc2c5d OS |
805 | } |
806 | ||
807 | static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, | |
808 | unsigned long event, void *ptr) | |
809 | { | |
810 | struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, | |
811 | uplink_priv.netdevice_nb); | |
812 | struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); | |
813 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); | |
814 | ||
35a605db | 815 | if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && |
24bcd210 | 816 | !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) |
f5bc2c5d OS |
817 | return NOTIFY_OK; |
818 | ||
819 | switch (event) { | |
820 | case NETDEV_REGISTER: | |
821 | mlx5e_rep_indr_register_block(rpriv, netdev); | |
822 | break; | |
823 | case NETDEV_UNREGISTER: | |
25f2d0e7 | 824 | mlx5e_rep_indr_unregister_block(rpriv, netdev); |
f5bc2c5d OS |
825 | break; |
826 | } | |
827 | return NOTIFY_OK; | |
828 | } | |
829 | ||
232c0013 HHZ |
830 | static struct mlx5e_neigh_hash_entry * |
831 | mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, | |
832 | struct mlx5e_neigh *m_neigh); | |
833 | ||
834 | static int mlx5e_rep_netevent_event(struct notifier_block *nb, | |
835 | unsigned long event, void *ptr) | |
836 | { | |
837 | struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, | |
838 | neigh_update.netevent_nb); | |
839 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
5ed99fb4 | 840 | struct net_device *netdev = rpriv->netdev; |
232c0013 HHZ |
841 | struct mlx5e_priv *priv = netdev_priv(netdev); |
842 | struct mlx5e_neigh_hash_entry *nhe = NULL; | |
843 | struct mlx5e_neigh m_neigh = {}; | |
a2fa1fe5 | 844 | struct neigh_parms *p; |
232c0013 | 845 | struct neighbour *n; |
a2fa1fe5 | 846 | bool found = false; |
232c0013 HHZ |
847 | |
848 | switch (event) { | |
849 | case NETEVENT_NEIGH_UPDATE: | |
850 | n = ptr; | |
851 | #if IS_ENABLED(CONFIG_IPV6) | |
423c9db2 | 852 | if (n->tbl != &nd_tbl && n->tbl != &arp_tbl) |
232c0013 HHZ |
853 | #else |
854 | if (n->tbl != &arp_tbl) | |
855 | #endif | |
856 | return NOTIFY_DONE; | |
857 | ||
858 | m_neigh.dev = n->dev; | |
f6dfb4c3 | 859 | m_neigh.family = n->ops->family; |
232c0013 HHZ |
860 | memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len); |
861 | ||
862 | /* We are in atomic context and can't take RTNL mutex, so use | |
863 | * spin_lock_bh to lookup the neigh table. bh is used since | |
864 | * netevent can be called from a softirq context. | |
865 | */ | |
866 | spin_lock_bh(&neigh_update->encap_lock); | |
867 | nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh); | |
868 | if (!nhe) { | |
869 | spin_unlock_bh(&neigh_update->encap_lock); | |
870 | return NOTIFY_DONE; | |
871 | } | |
872 | ||
873 | /* This assignment is valid as long as the the neigh reference | |
874 | * is taken | |
875 | */ | |
876 | nhe->n = n; | |
877 | ||
878 | /* Take a reference to ensure the neighbour and mlx5 encap | |
879 | * entry won't be destructed until we drop the reference in | |
880 | * delayed work. | |
881 | */ | |
882 | neigh_hold(n); | |
883 | mlx5e_rep_neigh_entry_hold(nhe); | |
884 | ||
885 | if (!queue_work(priv->wq, &nhe->neigh_update_work)) { | |
886 | mlx5e_rep_neigh_entry_release(nhe); | |
887 | neigh_release(n); | |
888 | } | |
889 | spin_unlock_bh(&neigh_update->encap_lock); | |
890 | break; | |
a2fa1fe5 HHZ |
891 | |
892 | case NETEVENT_DELAY_PROBE_TIME_UPDATE: | |
893 | p = ptr; | |
894 | ||
895 | /* We check the device is present since we don't care about | |
896 | * changes in the default table, we only care about changes | |
897 | * done per device delay prob time parameter. | |
898 | */ | |
899 | #if IS_ENABLED(CONFIG_IPV6) | |
423c9db2 | 900 | if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl)) |
a2fa1fe5 HHZ |
901 | #else |
902 | if (!p->dev || p->tbl != &arp_tbl) | |
903 | #endif | |
904 | return NOTIFY_DONE; | |
905 | ||
906 | /* We are in atomic context and can't take RTNL mutex, | |
907 | * so use spin_lock_bh to walk the neigh list and look for | |
908 | * the relevant device. bh is used since netevent can be | |
909 | * called from a softirq context. | |
910 | */ | |
911 | spin_lock_bh(&neigh_update->encap_lock); | |
912 | list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) { | |
913 | if (p->dev == nhe->m_neigh.dev) { | |
914 | found = true; | |
915 | break; | |
916 | } | |
917 | } | |
918 | spin_unlock_bh(&neigh_update->encap_lock); | |
919 | if (!found) | |
920 | return NOTIFY_DONE; | |
921 | ||
922 | neigh_update->min_interval = min_t(unsigned long, | |
923 | NEIGH_VAR(p, DELAY_PROBE_TIME), | |
924 | neigh_update->min_interval); | |
925 | mlx5_fc_update_sampling_interval(priv->mdev, | |
926 | neigh_update->min_interval); | |
927 | break; | |
232c0013 HHZ |
928 | } |
929 | return NOTIFY_DONE; | |
930 | } | |
931 | ||
37b498ff HHZ |
932 | static const struct rhashtable_params mlx5e_neigh_ht_params = { |
933 | .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node), | |
934 | .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh), | |
935 | .key_len = sizeof(struct mlx5e_neigh), | |
936 | .automatic_shrinking = true, | |
937 | }; | |
938 | ||
939 | static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv) | |
940 | { | |
941 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
232c0013 HHZ |
942 | int err; |
943 | ||
944 | err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params); | |
945 | if (err) | |
946 | return err; | |
37b498ff HHZ |
947 | |
948 | INIT_LIST_HEAD(&neigh_update->neigh_list); | |
232c0013 | 949 | spin_lock_init(&neigh_update->encap_lock); |
f6dfb4c3 HHZ |
950 | INIT_DELAYED_WORK(&neigh_update->neigh_stats_work, |
951 | mlx5e_rep_neigh_stats_work); | |
952 | mlx5e_rep_neigh_update_init_interval(rpriv); | |
232c0013 HHZ |
953 | |
954 | rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event; | |
955 | err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb); | |
956 | if (err) | |
957 | goto out_err; | |
958 | return 0; | |
959 | ||
960 | out_err: | |
961 | rhashtable_destroy(&neigh_update->neigh_ht); | |
962 | return err; | |
37b498ff HHZ |
963 | } |
964 | ||
965 | static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) | |
966 | { | |
967 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
5ed99fb4 | 968 | struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); |
232c0013 HHZ |
969 | |
970 | unregister_netevent_notifier(&neigh_update->netevent_nb); | |
971 | ||
972 | flush_workqueue(priv->wq); /* flush neigh update works */ | |
37b498ff | 973 | |
f6dfb4c3 HHZ |
974 | cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work); |
975 | ||
37b498ff HHZ |
976 | rhashtable_destroy(&neigh_update->neigh_ht); |
977 | } | |
978 | ||
979 | static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv, | |
980 | struct mlx5e_neigh_hash_entry *nhe) | |
981 | { | |
982 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
983 | int err; | |
984 | ||
985 | err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht, | |
986 | &nhe->rhash_node, | |
987 | mlx5e_neigh_ht_params); | |
988 | if (err) | |
989 | return err; | |
990 | ||
991 | list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list); | |
992 | ||
993 | return err; | |
994 | } | |
995 | ||
996 | static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv, | |
997 | struct mlx5e_neigh_hash_entry *nhe) | |
998 | { | |
999 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
1000 | ||
232c0013 HHZ |
1001 | spin_lock_bh(&rpriv->neigh_update.encap_lock); |
1002 | ||
37b498ff HHZ |
1003 | list_del(&nhe->neigh_list); |
1004 | ||
1005 | rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht, | |
1006 | &nhe->rhash_node, | |
1007 | mlx5e_neigh_ht_params); | |
232c0013 | 1008 | spin_unlock_bh(&rpriv->neigh_update.encap_lock); |
37b498ff HHZ |
1009 | } |
1010 | ||
232c0013 HHZ |
1011 | /* This function must only be called under RTNL lock or under the |
1012 | * representor's encap_lock in case RTNL mutex can't be held. | |
1013 | */ | |
37b498ff HHZ |
1014 | static struct mlx5e_neigh_hash_entry * |
1015 | mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, | |
1016 | struct mlx5e_neigh *m_neigh) | |
1017 | { | |
1018 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
1019 | struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; | |
1020 | ||
1021 | return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh, | |
1022 | mlx5e_neigh_ht_params); | |
1023 | } | |
1024 | ||
232c0013 HHZ |
1025 | static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv, |
1026 | struct mlx5e_encap_entry *e, | |
1027 | struct mlx5e_neigh_hash_entry **nhe) | |
1028 | { | |
1029 | int err; | |
1030 | ||
1031 | *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL); | |
1032 | if (!*nhe) | |
1033 | return -ENOMEM; | |
1034 | ||
1035 | memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); | |
1036 | INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update); | |
1037 | INIT_LIST_HEAD(&(*nhe)->encap_list); | |
1038 | refcount_set(&(*nhe)->refcnt, 1); | |
1039 | ||
1040 | err = mlx5e_rep_neigh_entry_insert(priv, *nhe); | |
1041 | if (err) | |
1042 | goto out_free; | |
1043 | return 0; | |
1044 | ||
1045 | out_free: | |
1046 | kfree(*nhe); | |
1047 | return err; | |
1048 | } | |
1049 | ||
1050 | static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv, | |
1051 | struct mlx5e_neigh_hash_entry *nhe) | |
1052 | { | |
1053 | /* The neigh hash entry must be removed from the hash table regardless | |
1054 | * of the reference count value, so it won't be found by the next | |
1055 | * neigh notification call. The neigh hash entry reference count is | |
1056 | * incremented only during creation and neigh notification calls and | |
1057 | * protects from freeing the nhe struct. | |
1058 | */ | |
1059 | mlx5e_rep_neigh_entry_remove(priv, nhe); | |
1060 | mlx5e_rep_neigh_entry_release(nhe); | |
1061 | } | |
1062 | ||
1063 | int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, | |
1064 | struct mlx5e_encap_entry *e) | |
1065 | { | |
97417f61 EB |
1066 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1067 | struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; | |
1068 | struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; | |
232c0013 HHZ |
1069 | struct mlx5e_neigh_hash_entry *nhe; |
1070 | int err; | |
1071 | ||
97417f61 EB |
1072 | err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type); |
1073 | if (err) | |
1074 | return err; | |
232c0013 HHZ |
1075 | nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); |
1076 | if (!nhe) { | |
1077 | err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); | |
97417f61 EB |
1078 | if (err) { |
1079 | mlx5_tun_entropy_refcount_dec(tun_entropy, | |
1080 | e->reformat_type); | |
232c0013 | 1081 | return err; |
97417f61 | 1082 | } |
232c0013 HHZ |
1083 | } |
1084 | list_add(&e->encap_list, &nhe->encap_list); | |
1085 | return 0; | |
1086 | } | |
1087 | ||
1088 | void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, | |
1089 | struct mlx5e_encap_entry *e) | |
1090 | { | |
97417f61 EB |
1091 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1092 | struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; | |
1093 | struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; | |
232c0013 HHZ |
1094 | struct mlx5e_neigh_hash_entry *nhe; |
1095 | ||
1096 | list_del(&e->encap_list); | |
1097 | nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); | |
1098 | ||
1099 | if (list_empty(&nhe->encap_list)) | |
1100 | mlx5e_rep_neigh_entry_destroy(priv, nhe); | |
97417f61 | 1101 | mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type); |
232c0013 HHZ |
1102 | } |
1103 | ||
9b81d5a9 | 1104 | static int mlx5e_rep_open(struct net_device *dev) |
20a1ea67 OG |
1105 | { |
1106 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1d447a39 SM |
1107 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1108 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
20a1ea67 OG |
1109 | int err; |
1110 | ||
63bfd399 EBE |
1111 | mutex_lock(&priv->state_lock); |
1112 | err = mlx5e_open_locked(dev); | |
20a1ea67 | 1113 | if (err) |
63bfd399 | 1114 | goto unlock; |
20a1ea67 | 1115 | |
84c9c8f2 | 1116 | if (!mlx5_modify_vport_admin_state(priv->mdev, |
cc9c82a8 | 1117 | MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, |
cbc44e76 BW |
1118 | rep->vport, 1, |
1119 | MLX5_VPORT_ADMIN_STATE_UP)) | |
20a1ea67 OG |
1120 | netif_carrier_on(dev); |
1121 | ||
63bfd399 EBE |
1122 | unlock: |
1123 | mutex_unlock(&priv->state_lock); | |
1124 | return err; | |
20a1ea67 OG |
1125 | } |
1126 | ||
9b81d5a9 | 1127 | static int mlx5e_rep_close(struct net_device *dev) |
20a1ea67 OG |
1128 | { |
1129 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1d447a39 SM |
1130 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1131 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
63bfd399 | 1132 | int ret; |
20a1ea67 | 1133 | |
63bfd399 | 1134 | mutex_lock(&priv->state_lock); |
84c9c8f2 | 1135 | mlx5_modify_vport_admin_state(priv->mdev, |
cc9c82a8 | 1136 | MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, |
cbc44e76 BW |
1137 | rep->vport, 1, |
1138 | MLX5_VPORT_ADMIN_STATE_DOWN); | |
63bfd399 EBE |
1139 | ret = mlx5e_close_locked(dev); |
1140 | mutex_unlock(&priv->state_lock); | |
1141 | return ret; | |
20a1ea67 OG |
1142 | } |
1143 | ||
de4784ca | 1144 | static int |
855afa09 | 1145 | mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, |
f9e30088 | 1146 | struct flow_cls_offload *cls_flower, int flags) |
d957b4e3 | 1147 | { |
8c818c27 | 1148 | switch (cls_flower->command) { |
f9e30088 | 1149 | case FLOW_CLS_REPLACE: |
71d82d2a OS |
1150 | return mlx5e_configure_flower(priv->netdev, priv, cls_flower, |
1151 | flags); | |
f9e30088 | 1152 | case FLOW_CLS_DESTROY: |
71d82d2a OS |
1153 | return mlx5e_delete_flower(priv->netdev, priv, cls_flower, |
1154 | flags); | |
f9e30088 | 1155 | case FLOW_CLS_STATS: |
71d82d2a OS |
1156 | return mlx5e_stats_flower(priv->netdev, priv, cls_flower, |
1157 | flags); | |
60bd4af8 OG |
1158 | default: |
1159 | return -EOPNOTSUPP; | |
1160 | } | |
1161 | } | |
1162 | ||
855afa09 JP |
1163 | static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, |
1164 | void *cb_priv) | |
1165 | { | |
1166 | struct mlx5e_priv *priv = cb_priv; | |
1167 | ||
1168 | switch (type) { | |
1169 | case TC_SETUP_CLSFLOWER: | |
d9ee0491 OG |
1170 | return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | |
1171 | MLX5E_TC_ESW_OFFLOAD); | |
855afa09 JP |
1172 | default: |
1173 | return -EOPNOTSUPP; | |
1174 | } | |
1175 | } | |
1176 | ||
3929502b VB |
1177 | static LIST_HEAD(mlx5e_rep_block_cb_list); |
1178 | ||
8c818c27 | 1179 | static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, |
de4784ca | 1180 | void *type_data) |
8c818c27 | 1181 | { |
4e95bc26 PNA |
1182 | struct mlx5e_priv *priv = netdev_priv(dev); |
1183 | ||
2572ac53 | 1184 | switch (type) { |
855afa09 | 1185 | case TC_SETUP_BLOCK: |
3929502b VB |
1186 | return flow_block_cb_setup_simple(type_data, |
1187 | &mlx5e_rep_block_cb_list, | |
4e95bc26 PNA |
1188 | mlx5e_rep_setup_tc_cb, |
1189 | priv, priv, true); | |
d957b4e3 OG |
1190 | default: |
1191 | return -EOPNOTSUPP; | |
1192 | } | |
1193 | } | |
1194 | ||
370bad0f OG |
1195 | bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) |
1196 | { | |
1d447a39 SM |
1197 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1198 | struct mlx5_eswitch_rep *rep; | |
1199 | ||
733d3e54 | 1200 | if (!MLX5_ESWITCH_MANAGER(priv->mdev)) |
1d447a39 | 1201 | return false; |
370bad0f | 1202 | |
d9ee0491 OG |
1203 | if (!rpriv) /* non vport rep mlx5e instances don't use this field */ |
1204 | return false; | |
370bad0f | 1205 | |
d9ee0491 | 1206 | rep = rpriv->rep; |
b05af6aa | 1207 | return (rep->vport == MLX5_VPORT_UPLINK); |
370bad0f OG |
1208 | } |
1209 | ||
13e509a4 | 1210 | static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) |
370bad0f | 1211 | { |
370bad0f OG |
1212 | switch (attr_id) { |
1213 | case IFLA_OFFLOAD_XSTATS_CPU_HIT: | |
370bad0f OG |
1214 | return true; |
1215 | } | |
1216 | ||
1217 | return false; | |
1218 | } | |
1219 | ||
1220 | static int | |
1221 | mlx5e_get_sw_stats64(const struct net_device *dev, | |
1222 | struct rtnl_link_stats64 *stats) | |
1223 | { | |
1224 | struct mlx5e_priv *priv = netdev_priv(dev); | |
370bad0f | 1225 | |
b832d4fd | 1226 | mlx5e_fold_sw_stats64(priv, stats); |
370bad0f OG |
1227 | return 0; |
1228 | } | |
1229 | ||
13e509a4 OG |
1230 | static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, |
1231 | void *sp) | |
370bad0f OG |
1232 | { |
1233 | switch (attr_id) { | |
1234 | case IFLA_OFFLOAD_XSTATS_CPU_HIT: | |
1235 | return mlx5e_get_sw_stats64(dev, sp); | |
1236 | } | |
1237 | ||
1238 | return -EINVAL; | |
1239 | } | |
1240 | ||
bc1f4470 | 1241 | static void |
9b81d5a9 | 1242 | mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) |
370bad0f OG |
1243 | { |
1244 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1245 | ||
ed56c519 | 1246 | /* update HW stats in background for next time */ |
cdeef2b1 | 1247 | mlx5e_queue_update_stats(priv); |
370bad0f | 1248 | memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); |
370bad0f OG |
1249 | } |
1250 | ||
9b81d5a9 | 1251 | static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) |
d9ee0491 OG |
1252 | { |
1253 | return mlx5e_change_mtu(netdev, new_mtu, NULL); | |
1254 | } | |
1255 | ||
b36cdb42 | 1256 | static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu) |
d9ee0491 | 1257 | { |
b36cdb42 | 1258 | return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); |
d9ee0491 OG |
1259 | } |
1260 | ||
b36cdb42 | 1261 | static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) |
d9ee0491 | 1262 | { |
b36cdb42 OG |
1263 | struct sockaddr *saddr = addr; |
1264 | ||
1265 | if (!is_valid_ether_addr(saddr->sa_data)) | |
1266 | return -EADDRNOTAVAIL; | |
1267 | ||
1268 | ether_addr_copy(netdev->dev_addr, saddr->sa_data); | |
1269 | return 0; | |
d9ee0491 OG |
1270 | } |
1271 | ||
6ce966fd OG |
1272 | static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, |
1273 | __be16 vlan_proto) | |
1274 | { | |
1275 | netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n"); | |
1276 | ||
1277 | if (vlan != 0) | |
1278 | return -EOPNOTSUPP; | |
1279 | ||
1280 | /* allow setting 0-vid for compatibility with libvirt */ | |
1281 | return 0; | |
1282 | } | |
1283 | ||
f60f315d PP |
1284 | static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) |
1285 | { | |
1286 | struct mlx5e_priv *priv = netdev_priv(dev); | |
1287 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
1288 | ||
1289 | return &rpriv->dl_port; | |
1290 | } | |
1291 | ||
9b81d5a9 VP |
1292 | static const struct net_device_ops mlx5e_netdev_ops_rep = { |
1293 | .ndo_open = mlx5e_rep_open, | |
1294 | .ndo_stop = mlx5e_rep_close, | |
d9ee0491 | 1295 | .ndo_start_xmit = mlx5e_xmit, |
d9ee0491 | 1296 | .ndo_setup_tc = mlx5e_rep_setup_tc, |
f60f315d | 1297 | .ndo_get_devlink_port = mlx5e_get_devlink_port, |
9b81d5a9 | 1298 | .ndo_get_stats64 = mlx5e_rep_get_stats, |
13e509a4 OG |
1299 | .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, |
1300 | .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, | |
9b81d5a9 | 1301 | .ndo_change_mtu = mlx5e_rep_change_mtu, |
d9ee0491 | 1302 | }; |
250a42b6 | 1303 | |
d9ee0491 | 1304 | static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { |
b36cdb42 | 1305 | .ndo_open = mlx5e_open, |
d9ee0491 | 1306 | .ndo_stop = mlx5e_close, |
cb67b832 | 1307 | .ndo_start_xmit = mlx5e_xmit, |
b36cdb42 | 1308 | .ndo_set_mac_address = mlx5e_uplink_rep_set_mac, |
8c818c27 | 1309 | .ndo_setup_tc = mlx5e_rep_setup_tc, |
f60f315d | 1310 | .ndo_get_devlink_port = mlx5e_get_devlink_port, |
d9ee0491 | 1311 | .ndo_get_stats64 = mlx5e_get_stats, |
13e509a4 OG |
1312 | .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, |
1313 | .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, | |
d9ee0491 | 1314 | .ndo_change_mtu = mlx5e_uplink_rep_change_mtu, |
073caf50 OG |
1315 | .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, |
1316 | .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, | |
1317 | .ndo_features_check = mlx5e_features_check, | |
1318 | .ndo_set_vf_mac = mlx5e_set_vf_mac, | |
1319 | .ndo_set_vf_rate = mlx5e_set_vf_rate, | |
1320 | .ndo_get_vf_config = mlx5e_get_vf_config, | |
1321 | .ndo_get_vf_stats = mlx5e_get_vf_stats, | |
6ce966fd | 1322 | .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, |
d3cbd425 | 1323 | .ndo_set_features = mlx5e_set_features, |
cb67b832 HHZ |
1324 | }; |
1325 | ||
a0646c88 EB |
1326 | bool mlx5e_eswitch_rep(struct net_device *netdev) |
1327 | { | |
9b81d5a9 | 1328 | if (netdev->netdev_ops == &mlx5e_netdev_ops_rep || |
a0646c88 EB |
1329 | netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep) |
1330 | return true; | |
1331 | ||
1332 | return false; | |
1333 | } | |
1334 | ||
025380b2 | 1335 | static void mlx5e_build_rep_params(struct net_device *netdev) |
cb67b832 | 1336 | { |
025380b2 | 1337 | struct mlx5e_priv *priv = netdev_priv(netdev); |
d9ee0491 OG |
1338 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1339 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
025380b2 OG |
1340 | struct mlx5_core_dev *mdev = priv->mdev; |
1341 | struct mlx5e_params *params; | |
1342 | ||
cb67b832 HHZ |
1343 | u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? |
1344 | MLX5_CQ_PERIOD_MODE_START_FROM_CQE : | |
1345 | MLX5_CQ_PERIOD_MODE_START_FROM_EQE; | |
1346 | ||
025380b2 | 1347 | params = &priv->channels.params; |
472a1e44 | 1348 | params->hard_mtu = MLX5E_ETH_HARD_MTU; |
025380b2 | 1349 | params->sw_mtu = netdev->mtu; |
d9ee0491 OG |
1350 | |
1351 | /* SQ */ | |
b05af6aa | 1352 | if (rep->vport == MLX5_VPORT_UPLINK) |
d9ee0491 OG |
1353 | params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; |
1354 | else | |
5d1f7354 | 1355 | params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; |
cb67b832 | 1356 | |
749359f4 GT |
1357 | /* RQ */ |
1358 | mlx5e_build_rq_params(mdev, params); | |
1359 | ||
1360 | /* CQ moderation params */ | |
9a317425 | 1361 | params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); |
6a9764ef | 1362 | mlx5e_set_rx_cq_mode_params(params, cq_period_mode); |
cb67b832 | 1363 | |
6a9764ef | 1364 | params->num_tc = 1; |
69dad68d | 1365 | params->tunneled_offload_en = false; |
5f195c2c CM |
1366 | |
1367 | mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); | |
84a09733 GT |
1368 | |
1369 | /* RSS */ | |
025380b2 | 1370 | mlx5e_build_rss_params(&priv->rss_params, params->num_channels); |
cb67b832 HHZ |
1371 | } |
1372 | ||
1373 | static void mlx5e_build_rep_netdev(struct net_device *netdev) | |
1374 | { | |
250a42b6 | 1375 | struct mlx5e_priv *priv = netdev_priv(netdev); |
d9ee0491 OG |
1376 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1377 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
250a42b6 | 1378 | struct mlx5_core_dev *mdev = priv->mdev; |
250a42b6 | 1379 | |
b05af6aa | 1380 | if (rep->vport == MLX5_VPORT_UPLINK) { |
c42260f1 | 1381 | SET_NETDEV_DEV(netdev, mdev->device); |
d9ee0491 OG |
1382 | netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; |
1383 | /* we want a persistent mac for the uplink rep */ | |
e1d974d0 | 1384 | mlx5_query_mac_address(mdev, netdev->dev_addr); |
ff9b85de | 1385 | netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops; |
b36cdb42 OG |
1386 | #ifdef CONFIG_MLX5_CORE_EN_DCB |
1387 | if (MLX5_CAP_GEN(mdev, qos)) | |
1388 | netdev->dcbnl_ops = &mlx5e_dcbnl_ops; | |
1389 | #endif | |
d9ee0491 | 1390 | } else { |
9b81d5a9 | 1391 | netdev->netdev_ops = &mlx5e_netdev_ops_rep; |
d9ee0491 | 1392 | eth_hw_addr_random(netdev); |
9b81d5a9 | 1393 | netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; |
d9ee0491 | 1394 | } |
cb67b832 HHZ |
1395 | |
1396 | netdev->watchdog_timeo = 15 * HZ; | |
1397 | ||
d3cbd425 | 1398 | netdev->features |= NETIF_F_NETNS_LOCAL; |
cb67b832 | 1399 | |
d3cbd425 | 1400 | netdev->hw_features |= NETIF_F_HW_TC; |
dabeb3b0 GT |
1401 | netdev->hw_features |= NETIF_F_SG; |
1402 | netdev->hw_features |= NETIF_F_IP_CSUM; | |
1403 | netdev->hw_features |= NETIF_F_IPV6_CSUM; | |
1404 | netdev->hw_features |= NETIF_F_GRO; | |
1405 | netdev->hw_features |= NETIF_F_TSO; | |
1406 | netdev->hw_features |= NETIF_F_TSO6; | |
1407 | netdev->hw_features |= NETIF_F_RXCSUM; | |
1408 | ||
d3cbd425 CM |
1409 | if (rep->vport == MLX5_VPORT_UPLINK) |
1410 | netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; | |
1411 | else | |
1ee4457c OG |
1412 | netdev->features |= NETIF_F_VLAN_CHALLENGED; |
1413 | ||
dabeb3b0 | 1414 | netdev->features |= netdev->hw_features; |
cb67b832 HHZ |
1415 | } |
1416 | ||
182570b2 FD |
1417 | static int mlx5e_init_rep(struct mlx5_core_dev *mdev, |
1418 | struct net_device *netdev, | |
1419 | const struct mlx5e_profile *profile, | |
1420 | void *ppriv) | |
cb67b832 | 1421 | { |
6a9764ef | 1422 | struct mlx5e_priv *priv = netdev_priv(netdev); |
182570b2 | 1423 | int err; |
6a9764ef | 1424 | |
519a0bf5 | 1425 | err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); |
182570b2 FD |
1426 | if (err) |
1427 | return err; | |
6a9764ef | 1428 | |
8956f001 | 1429 | priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; |
c139dbfd | 1430 | |
025380b2 | 1431 | mlx5e_build_rep_params(netdev); |
cb67b832 | 1432 | mlx5e_build_rep_netdev(netdev); |
237f258c FD |
1433 | |
1434 | mlx5e_timestamp_init(priv); | |
182570b2 FD |
1435 | |
1436 | return 0; | |
1437 | } | |
1438 | ||
1439 | static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) | |
1440 | { | |
1441 | mlx5e_netdev_cleanup(priv->netdev, priv); | |
cb67b832 HHZ |
1442 | } |
1443 | ||
84a09733 GT |
1444 | static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) |
1445 | { | |
1446 | struct ttc_params ttc_params = {}; | |
1447 | int tt, err; | |
1448 | ||
1449 | priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, | |
1450 | MLX5_FLOW_NAMESPACE_KERNEL); | |
1451 | ||
1452 | /* The inner_ttc in the ttc params is intentionally not set */ | |
1453 | ttc_params.any_tt_tirn = priv->direct_tir[0].tirn; | |
1454 | mlx5e_set_ttc_ft_params(&ttc_params); | |
1455 | for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) | |
1456 | ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; | |
1457 | ||
1458 | err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); | |
1459 | if (err) { | |
1460 | netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err); | |
1461 | return err; | |
1462 | } | |
1463 | return 0; | |
1464 | } | |
1465 | ||
092297e0 | 1466 | static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv) |
cb67b832 HHZ |
1467 | { |
1468 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | |
1d447a39 SM |
1469 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1470 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
74491de9 | 1471 | struct mlx5_flow_handle *flow_rule; |
c966f7d5 | 1472 | struct mlx5_flow_destination dest; |
092297e0 | 1473 | |
c966f7d5 GT |
1474 | dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; |
1475 | dest.tir_num = priv->direct_tir[0].tirn; | |
092297e0 GT |
1476 | flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, |
1477 | rep->vport, | |
c966f7d5 | 1478 | &dest); |
092297e0 GT |
1479 | if (IS_ERR(flow_rule)) |
1480 | return PTR_ERR(flow_rule); | |
1481 | rpriv->vport_rx_rule = flow_rule; | |
1482 | return 0; | |
1483 | } | |
1484 | ||
1485 | static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) | |
1486 | { | |
1487 | struct mlx5_core_dev *mdev = priv->mdev; | |
cb67b832 | 1488 | int err; |
cb67b832 | 1489 | |
2c3b5bee SM |
1490 | mlx5e_init_l2_addr(priv); |
1491 | ||
1462e48d RD |
1492 | err = mlx5e_open_drop_rq(priv, &priv->drop_rq); |
1493 | if (err) { | |
1494 | mlx5_core_err(mdev, "open drop rq failed, %d\n", err); | |
1495 | return err; | |
1496 | } | |
1497 | ||
84a09733 | 1498 | err = mlx5e_create_indirect_rqt(priv); |
8f493ffd | 1499 | if (err) |
1462e48d | 1500 | goto err_close_drop_rq; |
cb67b832 | 1501 | |
db05815b | 1502 | err = mlx5e_create_direct_rqts(priv, priv->direct_tir); |
84a09733 GT |
1503 | if (err) |
1504 | goto err_destroy_indirect_rqts; | |
1505 | ||
1506 | err = mlx5e_create_indirect_tirs(priv, false); | |
8f493ffd | 1507 | if (err) |
cb67b832 | 1508 | goto err_destroy_direct_rqts; |
cb67b832 | 1509 | |
db05815b | 1510 | err = mlx5e_create_direct_tirs(priv, priv->direct_tir); |
84a09733 GT |
1511 | if (err) |
1512 | goto err_destroy_indirect_tirs; | |
1513 | ||
1514 | err = mlx5e_create_rep_ttc_table(priv); | |
092297e0 | 1515 | if (err) |
cb67b832 | 1516 | goto err_destroy_direct_tirs; |
cb67b832 | 1517 | |
84a09733 GT |
1518 | err = mlx5e_create_rep_vport_rx_rule(priv); |
1519 | if (err) | |
1520 | goto err_destroy_ttc_table; | |
1521 | ||
cb67b832 HHZ |
1522 | return 0; |
1523 | ||
84a09733 GT |
1524 | err_destroy_ttc_table: |
1525 | mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); | |
cb67b832 | 1526 | err_destroy_direct_tirs: |
db05815b | 1527 | mlx5e_destroy_direct_tirs(priv, priv->direct_tir); |
84a09733 GT |
1528 | err_destroy_indirect_tirs: |
1529 | mlx5e_destroy_indirect_tirs(priv, false); | |
cb67b832 | 1530 | err_destroy_direct_rqts: |
db05815b | 1531 | mlx5e_destroy_direct_rqts(priv, priv->direct_tir); |
84a09733 GT |
1532 | err_destroy_indirect_rqts: |
1533 | mlx5e_destroy_rqt(priv, &priv->indir_rqt); | |
1462e48d RD |
1534 | err_close_drop_rq: |
1535 | mlx5e_close_drop_rq(&priv->drop_rq); | |
cb67b832 HHZ |
1536 | return err; |
1537 | } | |
1538 | ||
1539 | static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) | |
1540 | { | |
1d447a39 | 1541 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
cb67b832 | 1542 | |
5ed99fb4 | 1543 | mlx5_del_flow_rules(rpriv->vport_rx_rule); |
84a09733 | 1544 | mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); |
db05815b | 1545 | mlx5e_destroy_direct_tirs(priv, priv->direct_tir); |
84a09733 | 1546 | mlx5e_destroy_indirect_tirs(priv, false); |
db05815b | 1547 | mlx5e_destroy_direct_rqts(priv, priv->direct_tir); |
84a09733 | 1548 | mlx5e_destroy_rqt(priv, &priv->indir_rqt); |
1462e48d | 1549 | mlx5e_close_drop_rq(&priv->drop_rq); |
cb67b832 HHZ |
1550 | } |
1551 | ||
1552 | static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) | |
1553 | { | |
d9ee0491 OG |
1554 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1555 | struct mlx5_rep_uplink_priv *uplink_priv; | |
1556 | int tc, err; | |
cb67b832 HHZ |
1557 | |
1558 | err = mlx5e_create_tises(priv); | |
1559 | if (err) { | |
1560 | mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); | |
1561 | return err; | |
1562 | } | |
d9ee0491 | 1563 | |
b05af6aa | 1564 | if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { |
d9ee0491 OG |
1565 | uplink_priv = &rpriv->uplink_priv; |
1566 | ||
b4a23329 RD |
1567 | INIT_LIST_HEAD(&uplink_priv->unready_flows); |
1568 | ||
d9ee0491 OG |
1569 | /* init shared tc flow table */ |
1570 | err = mlx5e_tc_esw_init(&uplink_priv->tc_ht); | |
1571 | if (err) | |
1572 | goto destroy_tises; | |
1573 | ||
97417f61 EB |
1574 | mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev); |
1575 | ||
d9ee0491 OG |
1576 | /* init indirect block notifications */ |
1577 | INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); | |
1578 | uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event; | |
1579 | err = register_netdevice_notifier(&uplink_priv->netdevice_nb); | |
1580 | if (err) { | |
1581 | mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n"); | |
1582 | goto tc_esw_cleanup; | |
1583 | } | |
1584 | } | |
1585 | ||
cb67b832 | 1586 | return 0; |
d9ee0491 OG |
1587 | |
1588 | tc_esw_cleanup: | |
1589 | mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht); | |
1590 | destroy_tises: | |
1591 | for (tc = 0; tc < priv->profile->max_tc; tc++) | |
1592 | mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); | |
1593 | return err; | |
1594 | } | |
1595 | ||
1596 | static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) | |
1597 | { | |
1598 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
1599 | int tc; | |
1600 | ||
1601 | for (tc = 0; tc < priv->profile->max_tc; tc++) | |
1602 | mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); | |
1603 | ||
b05af6aa | 1604 | if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { |
d9ee0491 OG |
1605 | /* clean indirect TC block notifications */ |
1606 | unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb); | |
1607 | mlx5e_rep_indr_clean_block_privs(rpriv); | |
1608 | ||
1609 | /* delete shared tc flow table */ | |
1610 | mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); | |
1611 | } | |
cb67b832 HHZ |
1612 | } |
1613 | ||
9b81d5a9 | 1614 | static void mlx5e_rep_enable(struct mlx5e_priv *priv) |
b36cdb42 | 1615 | { |
6d7ee2ed | 1616 | mlx5e_set_netdev_mtu_boundaries(priv); |
b36cdb42 OG |
1617 | } |
1618 | ||
a90f88fe GT |
1619 | static int mlx5e_update_rep_rx(struct mlx5e_priv *priv) |
1620 | { | |
1621 | return 0; | |
1622 | } | |
1623 | ||
b36cdb42 OG |
1624 | static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data) |
1625 | { | |
1626 | struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); | |
b36cdb42 | 1627 | |
b4a23329 RD |
1628 | if (event == MLX5_EVENT_TYPE_PORT_CHANGE) { |
1629 | struct mlx5_eqe *eqe = data; | |
b36cdb42 | 1630 | |
b4a23329 RD |
1631 | switch (eqe->sub_type) { |
1632 | case MLX5_PORT_CHANGE_SUBTYPE_DOWN: | |
1633 | case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: | |
1634 | queue_work(priv->wq, &priv->update_carrier_work); | |
1635 | break; | |
1636 | default: | |
1637 | return NOTIFY_DONE; | |
1638 | } | |
1639 | ||
1640 | return NOTIFY_OK; | |
b36cdb42 OG |
1641 | } |
1642 | ||
b4a23329 RD |
1643 | if (event == MLX5_DEV_EVENT_PORT_AFFINITY) { |
1644 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | |
1645 | ||
1646 | queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work); | |
1647 | ||
1648 | return NOTIFY_OK; | |
1649 | } | |
1650 | ||
1651 | return NOTIFY_DONE; | |
b36cdb42 OG |
1652 | } |
1653 | ||
1654 | static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) | |
1655 | { | |
1656 | struct net_device *netdev = priv->netdev; | |
1657 | struct mlx5_core_dev *mdev = priv->mdev; | |
b4a23329 | 1658 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
b36cdb42 OG |
1659 | u16 max_mtu; |
1660 | ||
1661 | netdev->min_mtu = ETH_MIN_MTU; | |
1662 | mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); | |
1663 | netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); | |
1664 | mlx5e_set_dev_port_mtu(priv); | |
1665 | ||
b4a23329 RD |
1666 | INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work, |
1667 | mlx5e_tc_reoffload_flows_work); | |
1668 | ||
b36cdb42 OG |
1669 | mlx5_lag_add(mdev, netdev); |
1670 | priv->events_nb.notifier_call = uplink_rep_async_event; | |
1671 | mlx5_notifier_register(mdev, &priv->events_nb); | |
1672 | #ifdef CONFIG_MLX5_CORE_EN_DCB | |
1673 | mlx5e_dcbnl_initialize(priv); | |
1674 | mlx5e_dcbnl_init_app(priv); | |
1675 | #endif | |
1676 | } | |
1677 | ||
1678 | static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) | |
1679 | { | |
1680 | struct mlx5_core_dev *mdev = priv->mdev; | |
b4a23329 | 1681 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
b36cdb42 OG |
1682 | |
1683 | #ifdef CONFIG_MLX5_CORE_EN_DCB | |
1684 | mlx5e_dcbnl_delete_app(priv); | |
1685 | #endif | |
1686 | mlx5_notifier_unregister(mdev, &priv->events_nb); | |
b4a23329 | 1687 | cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work); |
b36cdb42 OG |
1688 | mlx5_lag_remove(mdev); |
1689 | } | |
1690 | ||
9b81d5a9 | 1691 | static const struct mlx5e_profile mlx5e_rep_profile = { |
cb67b832 | 1692 | .init = mlx5e_init_rep, |
182570b2 | 1693 | .cleanup = mlx5e_cleanup_rep, |
cb67b832 HHZ |
1694 | .init_rx = mlx5e_init_rep_rx, |
1695 | .cleanup_rx = mlx5e_cleanup_rep_rx, | |
1696 | .init_tx = mlx5e_init_rep_tx, | |
d9ee0491 | 1697 | .cleanup_tx = mlx5e_cleanup_rep_tx, |
9b81d5a9 | 1698 | .enable = mlx5e_rep_enable, |
a90f88fe | 1699 | .update_rx = mlx5e_update_rep_rx, |
9b81d5a9 | 1700 | .update_stats = mlx5e_rep_update_hw_counters, |
20fd0c19 | 1701 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, |
749359f4 | 1702 | .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, |
cb67b832 | 1703 | .max_tc = 1, |
694826e3 | 1704 | .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), |
cb67b832 HHZ |
1705 | }; |
1706 | ||
b36cdb42 OG |
1707 | static const struct mlx5e_profile mlx5e_uplink_rep_profile = { |
1708 | .init = mlx5e_init_rep, | |
1709 | .cleanup = mlx5e_cleanup_rep, | |
1710 | .init_rx = mlx5e_init_rep_rx, | |
1711 | .cleanup_rx = mlx5e_cleanup_rep_rx, | |
1712 | .init_tx = mlx5e_init_rep_tx, | |
1713 | .cleanup_tx = mlx5e_cleanup_rep_tx, | |
1714 | .enable = mlx5e_uplink_rep_enable, | |
1715 | .disable = mlx5e_uplink_rep_disable, | |
a90f88fe | 1716 | .update_rx = mlx5e_update_rep_rx, |
b36cdb42 OG |
1717 | .update_stats = mlx5e_uplink_rep_update_hw_counters, |
1718 | .update_carrier = mlx5e_update_carrier, | |
1719 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, | |
1720 | .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, | |
1721 | .max_tc = MLX5E_MAX_NUM_TC, | |
694826e3 | 1722 | .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), |
b36cdb42 OG |
1723 | }; |
1724 | ||
f60f315d PP |
1725 | static bool |
1726 | is_devlink_port_supported(const struct mlx5_core_dev *dev, | |
1727 | const struct mlx5e_rep_priv *rpriv) | |
1728 | { | |
1729 | return rpriv->rep->vport == MLX5_VPORT_UPLINK || | |
1730 | rpriv->rep->vport == MLX5_VPORT_PF || | |
1731 | mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport); | |
1732 | } | |
1733 | ||
1734 | static int register_devlink_port(struct mlx5_core_dev *dev, | |
1735 | struct mlx5e_rep_priv *rpriv) | |
1736 | { | |
1737 | struct devlink *devlink = priv_to_devlink(dev); | |
1738 | struct mlx5_eswitch_rep *rep = rpriv->rep; | |
1739 | struct netdev_phys_item_id ppid = {}; | |
1740 | int ret; | |
1741 | ||
1742 | if (!is_devlink_port_supported(dev, rpriv)) | |
1743 | return 0; | |
1744 | ||
1745 | ret = mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); | |
1746 | if (ret) | |
1747 | return ret; | |
1748 | ||
1749 | if (rep->vport == MLX5_VPORT_UPLINK) | |
1750 | devlink_port_attrs_set(&rpriv->dl_port, | |
1751 | DEVLINK_PORT_FLAVOUR_PHYSICAL, | |
1752 | PCI_FUNC(dev->pdev->devfn), false, 0, | |
1753 | &ppid.id[0], ppid.id_len); | |
1754 | else if (rep->vport == MLX5_VPORT_PF) | |
1755 | devlink_port_attrs_pci_pf_set(&rpriv->dl_port, | |
1756 | &ppid.id[0], ppid.id_len, | |
1757 | dev->pdev->devfn); | |
1758 | else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) | |
1759 | devlink_port_attrs_pci_vf_set(&rpriv->dl_port, | |
1760 | &ppid.id[0], ppid.id_len, | |
1761 | dev->pdev->devfn, | |
1762 | rep->vport - 1); | |
1763 | ||
1764 | return devlink_port_register(devlink, &rpriv->dl_port, rep->vport); | |
1765 | } | |
1766 | ||
1767 | static void unregister_devlink_port(struct mlx5_core_dev *dev, | |
1768 | struct mlx5e_rep_priv *rpriv) | |
1769 | { | |
1770 | if (is_devlink_port_supported(dev, rpriv)) | |
1771 | devlink_port_unregister(&rpriv->dl_port); | |
1772 | } | |
1773 | ||
1d447a39 | 1774 | /* e-Switch vport representors */ |
1d447a39 | 1775 | static int |
4c66df01 | 1776 | mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) |
1d447a39 | 1777 | { |
b36cdb42 | 1778 | const struct mlx5e_profile *profile; |
1d447a39 | 1779 | struct mlx5e_rep_priv *rpriv; |
26e59d80 | 1780 | struct net_device *netdev; |
779d986d | 1781 | int nch, err; |
26e59d80 | 1782 | |
1d447a39 SM |
1783 | rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); |
1784 | if (!rpriv) | |
1785 | return -ENOMEM; | |
1786 | ||
d9ee0491 OG |
1787 | /* rpriv->rep to be looked up when profile->init() is called */ |
1788 | rpriv->rep = rep; | |
1789 | ||
779d986d | 1790 | nch = mlx5e_get_max_num_channels(dev); |
9b81d5a9 VP |
1791 | profile = (rep->vport == MLX5_VPORT_UPLINK) ? |
1792 | &mlx5e_uplink_rep_profile : &mlx5e_rep_profile; | |
b36cdb42 | 1793 | netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); |
26e59d80 MHY |
1794 | if (!netdev) { |
1795 | pr_warn("Failed to create representor netdev for vport %d\n", | |
1796 | rep->vport); | |
1d447a39 | 1797 | kfree(rpriv); |
cb67b832 HHZ |
1798 | return -EINVAL; |
1799 | } | |
26e59d80 | 1800 | |
5ed99fb4 | 1801 | rpriv->netdev = netdev; |
8693115a | 1802 | rep->rep_data[REP_ETH].priv = rpriv; |
5ed99fb4 | 1803 | INIT_LIST_HEAD(&rpriv->vport_sqs_list); |
26e59d80 | 1804 | |
b05af6aa | 1805 | if (rep->vport == MLX5_VPORT_UPLINK) { |
aec002f6 OG |
1806 | err = mlx5e_create_mdev_resources(dev); |
1807 | if (err) | |
1808 | goto err_destroy_netdev; | |
1809 | } | |
1810 | ||
2c3b5bee | 1811 | err = mlx5e_attach_netdev(netdev_priv(netdev)); |
26e59d80 MHY |
1812 | if (err) { |
1813 | pr_warn("Failed to attach representor netdev for vport %d\n", | |
1814 | rep->vport); | |
aec002f6 | 1815 | goto err_destroy_mdev_resources; |
26e59d80 MHY |
1816 | } |
1817 | ||
37b498ff HHZ |
1818 | err = mlx5e_rep_neigh_init(rpriv); |
1819 | if (err) { | |
1820 | pr_warn("Failed to initialized neighbours handling for vport %d\n", | |
1821 | rep->vport); | |
1822 | goto err_detach_netdev; | |
1823 | } | |
1824 | ||
f60f315d PP |
1825 | err = register_devlink_port(dev, rpriv); |
1826 | if (err) { | |
1827 | esw_warn(dev, "Failed to register devlink port %d\n", | |
1828 | rep->vport); | |
1829 | goto err_neigh_cleanup; | |
1830 | } | |
1831 | ||
26e59d80 MHY |
1832 | err = register_netdev(netdev); |
1833 | if (err) { | |
1834 | pr_warn("Failed to register representor netdev for vport %d\n", | |
1835 | rep->vport); | |
f60f315d | 1836 | goto err_devlink_cleanup; |
26e59d80 MHY |
1837 | } |
1838 | ||
f60f315d PP |
1839 | if (is_devlink_port_supported(dev, rpriv)) |
1840 | devlink_port_type_eth_set(&rpriv->dl_port, netdev); | |
cb67b832 | 1841 | return 0; |
26e59d80 | 1842 | |
f60f315d PP |
1843 | err_devlink_cleanup: |
1844 | unregister_devlink_port(dev, rpriv); | |
1845 | ||
37b498ff HHZ |
1846 | err_neigh_cleanup: |
1847 | mlx5e_rep_neigh_cleanup(rpriv); | |
1848 | ||
26e59d80 | 1849 | err_detach_netdev: |
2c3b5bee | 1850 | mlx5e_detach_netdev(netdev_priv(netdev)); |
26e59d80 | 1851 | |
aec002f6 | 1852 | err_destroy_mdev_resources: |
b05af6aa | 1853 | if (rep->vport == MLX5_VPORT_UPLINK) |
aec002f6 OG |
1854 | mlx5e_destroy_mdev_resources(dev); |
1855 | ||
26e59d80 | 1856 | err_destroy_netdev: |
2c3b5bee | 1857 | mlx5e_destroy_netdev(netdev_priv(netdev)); |
1d447a39 | 1858 | kfree(rpriv); |
26e59d80 | 1859 | return err; |
cb67b832 HHZ |
1860 | } |
1861 | ||
1d447a39 | 1862 | static void |
4c66df01 | 1863 | mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) |
cb67b832 | 1864 | { |
5ed99fb4 MB |
1865 | struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); |
1866 | struct net_device *netdev = rpriv->netdev; | |
1d447a39 | 1867 | struct mlx5e_priv *priv = netdev_priv(netdev); |
f60f315d | 1868 | struct mlx5_core_dev *dev = priv->mdev; |
1d447a39 | 1869 | void *ppriv = priv->ppriv; |
cb67b832 | 1870 | |
f60f315d PP |
1871 | if (is_devlink_port_supported(dev, rpriv)) |
1872 | devlink_port_type_clear(&rpriv->dl_port); | |
5ed99fb4 | 1873 | unregister_netdev(netdev); |
f60f315d | 1874 | unregister_devlink_port(dev, rpriv); |
37b498ff | 1875 | mlx5e_rep_neigh_cleanup(rpriv); |
1d447a39 | 1876 | mlx5e_detach_netdev(priv); |
b05af6aa | 1877 | if (rep->vport == MLX5_VPORT_UPLINK) |
aec002f6 | 1878 | mlx5e_destroy_mdev_resources(priv->mdev); |
1d447a39 SM |
1879 | mlx5e_destroy_netdev(priv); |
1880 | kfree(ppriv); /* mlx5e_rep_priv */ | |
1881 | } | |
1882 | ||
22215908 MB |
1883 | static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) |
1884 | { | |
1885 | struct mlx5e_rep_priv *rpriv; | |
1886 | ||
1887 | rpriv = mlx5e_rep_to_rep_priv(rep); | |
1888 | ||
1889 | return rpriv->netdev; | |
1890 | } | |
1891 | ||
8693115a PP |
1892 | static const struct mlx5_eswitch_rep_ops rep_ops = { |
1893 | .load = mlx5e_vport_rep_load, | |
1894 | .unload = mlx5e_vport_rep_unload, | |
1895 | .get_proto_dev = mlx5e_vport_rep_get_proto_dev | |
1896 | }; | |
1897 | ||
aec002f6 | 1898 | void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) |
1d447a39 | 1899 | { |
aec002f6 | 1900 | struct mlx5_eswitch *esw = mdev->priv.eswitch; |
1d447a39 | 1901 | |
8693115a | 1902 | mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH); |
1d447a39 SM |
1903 | } |
1904 | ||
aec002f6 | 1905 | void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) |
1d447a39 | 1906 | { |
1d447a39 | 1907 | struct mlx5_eswitch *esw = mdev->priv.eswitch; |
1d447a39 | 1908 | |
f8e8fa02 | 1909 | mlx5_eswitch_unregister_vport_reps(esw, REP_ETH); |
1d447a39 | 1910 | } |