Commit | Line | Data |
---|---|---|
48935bbb SM |
1 | /* |
2 | * Copyright (c) 2017, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
693dfd5a | 33 | #include <rdma/ib_verbs.h> |
48935bbb SM |
34 | #include <linux/mlx5/fs.h> |
35 | #include "en.h" | |
36 | #include "ipoib.h" | |
37 | ||
ec8fd927 SM |
38 | #define IB_DEFAULT_Q_KEY 0xb1b |
39 | ||
603f4a45 SM |
40 | static int mlx5i_open(struct net_device *netdev); |
41 | static int mlx5i_close(struct net_device *netdev); | |
42 | static int mlx5i_dev_init(struct net_device *dev); | |
43 | static void mlx5i_dev_cleanup(struct net_device *dev); | |
44 | ||
45 | static const struct net_device_ops mlx5i_netdev_ops = { | |
46 | .ndo_open = mlx5i_open, | |
47 | .ndo_stop = mlx5i_close, | |
48 | .ndo_init = mlx5i_dev_init, | |
49 | .ndo_uninit = mlx5i_dev_cleanup, | |
50 | }; | |
51 | ||
48935bbb SM |
52 | /* IPoIB mlx5 netdev profile */ |
53 | ||
54 | /* Called directly after IPoIB netdevice was created to initialize SW structs */ | |
55 | static void mlx5i_init(struct mlx5_core_dev *mdev, | |
56 | struct net_device *netdev, | |
57 | const struct mlx5e_profile *profile, | |
58 | void *ppriv) | |
59 | { | |
60 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); | |
61 | ||
8f493ffd SM |
62 | priv->mdev = mdev; |
63 | priv->netdev = netdev; | |
64 | priv->profile = profile; | |
65 | priv->ppriv = ppriv; | |
66 | ||
67 | mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); | |
68 | ||
69 | mutex_init(&priv->state_lock); | |
603f4a45 SM |
70 | |
71 | netdev->hw_features |= NETIF_F_SG; | |
72 | netdev->hw_features |= NETIF_F_IP_CSUM; | |
73 | netdev->hw_features |= NETIF_F_IPV6_CSUM; | |
74 | netdev->hw_features |= NETIF_F_GRO; | |
75 | netdev->hw_features |= NETIF_F_TSO; | |
76 | netdev->hw_features |= NETIF_F_TSO6; | |
77 | netdev->hw_features |= NETIF_F_RXCSUM; | |
78 | netdev->hw_features |= NETIF_F_RXHASH; | |
79 | ||
80 | netdev->netdev_ops = &mlx5i_netdev_ops; | |
48935bbb SM |
81 | } |
82 | ||
83 | /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ | |
84 | static void mlx5i_cleanup(struct mlx5e_priv *priv) | |
85 | { | |
86 | /* Do nothing .. */ | |
87 | } | |
88 | ||
ec8fd927 SM |
89 | #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 |
90 | ||
91 | static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) | |
92 | { | |
93 | struct mlx5_qp_context *context = NULL; | |
94 | u32 *in = NULL; | |
95 | void *addr_path; | |
96 | int ret = 0; | |
97 | int inlen; | |
98 | void *qpc; | |
99 | ||
100 | inlen = MLX5_ST_SZ_BYTES(create_qp_in); | |
101 | in = mlx5_vzalloc(inlen); | |
102 | if (!in) | |
103 | return -ENOMEM; | |
104 | ||
105 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); | |
106 | MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); | |
107 | MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); | |
108 | MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, | |
109 | MLX5_QP_ENHANCED_ULP_STATELESS_MODE); | |
110 | ||
111 | addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); | |
112 | MLX5_SET(ads, addr_path, port, 1); | |
113 | MLX5_SET(ads, addr_path, grh, 1); | |
114 | ||
115 | ret = mlx5_core_create_qp(mdev, qp, in, inlen); | |
116 | if (ret) { | |
117 | mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret); | |
118 | goto out; | |
119 | } | |
120 | ||
121 | /* QP states */ | |
122 | context = kzalloc(sizeof(*context), GFP_KERNEL); | |
123 | if (!context) { | |
124 | ret = -ENOMEM; | |
125 | goto out; | |
126 | } | |
127 | ||
128 | context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); | |
129 | context->pri_path.port = 1; | |
130 | context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); | |
131 | ||
132 | ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); | |
133 | if (ret) { | |
134 | mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret); | |
135 | goto out; | |
136 | } | |
137 | memset(context, 0, sizeof(*context)); | |
138 | ||
139 | ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp); | |
140 | if (ret) { | |
141 | mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret); | |
142 | goto out; | |
143 | } | |
144 | ||
145 | ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp); | |
146 | if (ret) { | |
147 | mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret); | |
148 | goto out; | |
149 | } | |
150 | ||
151 | out: | |
152 | kfree(context); | |
153 | kvfree(in); | |
154 | return ret; | |
155 | } | |
156 | ||
157 | static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) | |
158 | { | |
159 | mlx5_core_destroy_qp(mdev, qp); | |
160 | } | |
161 | ||
48935bbb SM |
162 | static int mlx5i_init_tx(struct mlx5e_priv *priv) |
163 | { | |
5426a0b2 SM |
164 | struct mlx5i_priv *ipriv = priv->ppriv; |
165 | int err; | |
166 | ||
ec8fd927 SM |
167 | err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp); |
168 | if (err) { | |
169 | mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); | |
170 | return err; | |
171 | } | |
5426a0b2 SM |
172 | |
173 | err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); | |
174 | if (err) { | |
175 | mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); | |
176 | return err; | |
177 | } | |
178 | ||
48935bbb SM |
179 | return 0; |
180 | } | |
181 | ||
a7082ef0 | 182 | static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) |
48935bbb | 183 | { |
ec8fd927 SM |
184 | struct mlx5i_priv *ipriv = priv->ppriv; |
185 | ||
5426a0b2 | 186 | mlx5e_destroy_tis(priv->mdev, priv->tisn[0]); |
ec8fd927 | 187 | mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); |
48935bbb SM |
188 | } |
189 | ||
bc81b9d3 SM |
190 | static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) |
191 | { | |
192 | struct mlx5i_priv *ipriv = priv->ppriv; | |
193 | int err; | |
194 | ||
195 | priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, | |
196 | MLX5_FLOW_NAMESPACE_KERNEL); | |
197 | ||
198 | if (!priv->fs.ns) | |
199 | return -EINVAL; | |
200 | ||
201 | err = mlx5e_arfs_create_tables(priv); | |
202 | if (err) { | |
203 | netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", | |
204 | err); | |
205 | priv->netdev->hw_features &= ~NETIF_F_NTUPLE; | |
206 | } | |
207 | ||
208 | err = mlx5e_create_ttc_table(priv, ipriv->qp.qpn); | |
209 | if (err) { | |
210 | netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", | |
211 | err); | |
212 | goto err_destroy_arfs_tables; | |
213 | } | |
214 | ||
215 | return 0; | |
216 | ||
217 | err_destroy_arfs_tables: | |
218 | mlx5e_arfs_destroy_tables(priv); | |
219 | ||
220 | return err; | |
221 | } | |
222 | ||
223 | static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) | |
224 | { | |
225 | mlx5e_destroy_ttc_table(priv); | |
226 | mlx5e_arfs_destroy_tables(priv); | |
227 | } | |
228 | ||
48935bbb SM |
229 | static int mlx5i_init_rx(struct mlx5e_priv *priv) |
230 | { | |
8f493ffd SM |
231 | int err; |
232 | ||
233 | err = mlx5e_create_indirect_rqt(priv); | |
234 | if (err) | |
235 | return err; | |
236 | ||
237 | err = mlx5e_create_direct_rqts(priv); | |
238 | if (err) | |
239 | goto err_destroy_indirect_rqts; | |
240 | ||
241 | err = mlx5e_create_indirect_tirs(priv); | |
242 | if (err) | |
243 | goto err_destroy_direct_rqts; | |
244 | ||
245 | err = mlx5e_create_direct_tirs(priv); | |
246 | if (err) | |
247 | goto err_destroy_indirect_tirs; | |
248 | ||
bc81b9d3 SM |
249 | err = mlx5i_create_flow_steering(priv); |
250 | if (err) | |
251 | goto err_destroy_direct_tirs; | |
252 | ||
48935bbb | 253 | return 0; |
8f493ffd | 254 | |
bc81b9d3 SM |
255 | err_destroy_direct_tirs: |
256 | mlx5e_destroy_direct_tirs(priv); | |
8f493ffd SM |
257 | err_destroy_indirect_tirs: |
258 | mlx5e_destroy_indirect_tirs(priv); | |
259 | err_destroy_direct_rqts: | |
260 | mlx5e_destroy_direct_rqts(priv); | |
261 | err_destroy_indirect_rqts: | |
262 | mlx5e_destroy_rqt(priv, &priv->indir_rqt); | |
263 | return err; | |
48935bbb SM |
264 | } |
265 | ||
266 | static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) | |
267 | { | |
bc81b9d3 | 268 | mlx5i_destroy_flow_steering(priv); |
8f493ffd SM |
269 | mlx5e_destroy_direct_tirs(priv); |
270 | mlx5e_destroy_indirect_tirs(priv); | |
271 | mlx5e_destroy_direct_rqts(priv); | |
272 | mlx5e_destroy_rqt(priv, &priv->indir_rqt); | |
48935bbb SM |
273 | } |
274 | ||
275 | static const struct mlx5e_profile mlx5i_nic_profile = { | |
276 | .init = mlx5i_init, | |
277 | .cleanup = mlx5i_cleanup, | |
278 | .init_tx = mlx5i_init_tx, | |
279 | .cleanup_tx = mlx5i_cleanup_tx, | |
280 | .init_rx = mlx5i_init_rx, | |
281 | .cleanup_rx = mlx5i_cleanup_rx, | |
282 | .enable = NULL, /* mlx5i_enable */ | |
283 | .disable = NULL, /* mlx5i_disable */ | |
284 | .update_stats = NULL, /* mlx5i_update_stats */ | |
285 | .max_nch = mlx5e_get_max_num_channels, | |
9d6bd752 SM |
286 | .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, |
287 | .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ | |
48935bbb SM |
288 | .max_tc = MLX5I_MAX_NUM_TC, |
289 | }; | |
290 | ||
603f4a45 SM |
291 | /* mlx5i netdev NDos */ |
292 | ||
293 | static int mlx5i_dev_init(struct net_device *dev) | |
294 | { | |
295 | struct mlx5e_priv *priv = mlx5i_epriv(dev); | |
296 | struct mlx5i_priv *ipriv = priv->ppriv; | |
297 | ||
298 | /* Set dev address using underlay QP */ | |
299 | dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff; | |
300 | dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff; | |
301 | dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff; | |
302 | ||
303 | return 0; | |
304 | } | |
305 | ||
306 | static void mlx5i_dev_cleanup(struct net_device *dev) | |
307 | { | |
ec8fd927 SM |
308 | struct mlx5e_priv *priv = mlx5i_epriv(dev); |
309 | struct mlx5_core_dev *mdev = priv->mdev; | |
310 | struct mlx5i_priv *ipriv = priv->ppriv; | |
311 | struct mlx5_qp_context context; | |
312 | ||
313 | /* detach qp from flow-steering by reset it */ | |
314 | mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp); | |
603f4a45 SM |
315 | } |
316 | ||
317 | static int mlx5i_open(struct net_device *netdev) | |
318 | { | |
319 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); | |
320 | int err; | |
321 | ||
322 | mutex_lock(&priv->state_lock); | |
323 | ||
324 | set_bit(MLX5E_STATE_OPENED, &priv->state); | |
325 | ||
326 | err = mlx5e_open_channels(priv, &priv->channels); | |
327 | if (err) | |
328 | goto err_clear_state_opened_flag; | |
329 | ||
330 | mlx5e_refresh_tirs(priv, false); | |
331 | mlx5e_activate_priv_channels(priv); | |
332 | mutex_unlock(&priv->state_lock); | |
333 | return 0; | |
334 | ||
335 | err_clear_state_opened_flag: | |
336 | clear_bit(MLX5E_STATE_OPENED, &priv->state); | |
337 | mutex_unlock(&priv->state_lock); | |
338 | return err; | |
339 | } | |
340 | ||
341 | static int mlx5i_close(struct net_device *netdev) | |
342 | { | |
343 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); | |
344 | ||
345 | /* May already be CLOSED in case a previous configuration operation | |
346 | * (e.g RX/TX queue size change) that involves close&open failed. | |
347 | */ | |
348 | mutex_lock(&priv->state_lock); | |
349 | ||
350 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) | |
351 | goto unlock; | |
352 | ||
353 | clear_bit(MLX5E_STATE_OPENED, &priv->state); | |
354 | ||
355 | netif_carrier_off(priv->netdev); | |
356 | mlx5e_deactivate_priv_channels(priv); | |
357 | mlx5e_close_channels(&priv->channels); | |
358 | unlock: | |
359 | mutex_unlock(&priv->state_lock); | |
360 | return 0; | |
361 | } | |
362 | ||
48935bbb | 363 | /* IPoIB RDMA netdev callbacks */ |
a7082ef0 | 364 | static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, |
693dfd5a ES |
365 | union ib_gid *gid, u16 lid, int set_qkey, |
366 | u32 qkey) | |
ec8fd927 SM |
367 | { |
368 | struct mlx5e_priv *epriv = mlx5i_epriv(netdev); | |
369 | struct mlx5_core_dev *mdev = epriv->mdev; | |
370 | struct mlx5i_priv *ipriv = epriv->ppriv; | |
371 | int err; | |
372 | ||
373 | mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); | |
374 | err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn); | |
375 | if (err) | |
376 | mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", | |
377 | ipriv->qp.qpn, gid->raw); | |
378 | ||
693dfd5a ES |
379 | if (set_qkey) { |
380 | mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", | |
381 | netdev->name, qkey); | |
382 | ipriv->qkey = qkey; | |
383 | } | |
384 | ||
ec8fd927 SM |
385 | return err; |
386 | } | |
387 | ||
a7082ef0 SH |
388 | static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, |
389 | union ib_gid *gid, u16 lid) | |
ec8fd927 SM |
390 | { |
391 | struct mlx5e_priv *epriv = mlx5i_epriv(netdev); | |
392 | struct mlx5_core_dev *mdev = epriv->mdev; | |
393 | struct mlx5i_priv *ipriv = epriv->ppriv; | |
394 | int err; | |
395 | ||
396 | mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); | |
397 | ||
398 | err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); | |
399 | if (err) | |
400 | mlx5_core_dbg(mdev, "failed dettaching QPN 0x%x, MGID %pI6\n", | |
401 | ipriv->qp.qpn, gid->raw); | |
402 | ||
403 | return err; | |
404 | } | |
48935bbb | 405 | |
a7082ef0 | 406 | static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, |
693dfd5a | 407 | struct ib_ah *address, u32 dqpn) |
25854544 SM |
408 | { |
409 | struct mlx5e_priv *epriv = mlx5i_epriv(dev); | |
410 | struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; | |
411 | struct mlx5_ib_ah *mah = to_mah(address); | |
693dfd5a | 412 | struct mlx5i_priv *ipriv = epriv->ppriv; |
25854544 | 413 | |
693dfd5a | 414 | return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey); |
25854544 SM |
415 | } |
416 | ||
48935bbb SM |
417 | static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) |
418 | { | |
419 | if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) | |
420 | return -EOPNOTSUPP; | |
421 | ||
422 | if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { | |
423 | mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n"); | |
693dfd5a | 424 | return -EOPNOTSUPP; |
48935bbb SM |
425 | } |
426 | ||
427 | return 0; | |
428 | } | |
429 | ||
693dfd5a ES |
430 | struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, |
431 | struct ib_device *ibdev, | |
432 | const char *name, | |
433 | void (*setup)(struct net_device *)) | |
48935bbb SM |
434 | { |
435 | const struct mlx5e_profile *profile = &mlx5i_nic_profile; | |
436 | int nch = profile->max_nch(mdev); | |
437 | struct net_device *netdev; | |
438 | struct mlx5i_priv *ipriv; | |
439 | struct mlx5e_priv *epriv; | |
693dfd5a | 440 | struct rdma_netdev *rn; |
48935bbb SM |
441 | int err; |
442 | ||
443 | if (mlx5i_check_required_hca_cap(mdev)) { | |
444 | mlx5_core_warn(mdev, "Accelerated mode is not supported\n"); | |
445 | return ERR_PTR(-EOPNOTSUPP); | |
446 | } | |
447 | ||
448 | /* This function should only be called once per mdev */ | |
449 | err = mlx5e_create_mdev_resources(mdev); | |
450 | if (err) | |
451 | return NULL; | |
452 | ||
453 | netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv), | |
454 | name, NET_NAME_UNKNOWN, | |
455 | setup, | |
456 | nch * MLX5E_MAX_NUM_TC, | |
457 | nch); | |
458 | if (!netdev) { | |
459 | mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n"); | |
460 | goto free_mdev_resources; | |
461 | } | |
462 | ||
463 | ipriv = netdev_priv(netdev); | |
464 | epriv = mlx5i_epriv(netdev); | |
465 | ||
466 | epriv->wq = create_singlethread_workqueue("mlx5i"); | |
467 | if (!epriv->wq) | |
468 | goto err_free_netdev; | |
469 | ||
470 | profile->init(mdev, netdev, profile, ipriv); | |
471 | ||
472 | mlx5e_attach_netdev(epriv); | |
473 | netif_carrier_off(netdev); | |
474 | ||
693dfd5a ES |
475 | /* set rdma_netdev func pointers */ |
476 | rn = &ipriv->rn; | |
477 | rn->hca = ibdev; | |
478 | rn->send = mlx5i_xmit; | |
479 | rn->attach_mcast = mlx5i_attach_mcast; | |
480 | rn->detach_mcast = mlx5i_detach_mcast; | |
481 | ||
48935bbb SM |
482 | return netdev; |
483 | ||
48935bbb SM |
484 | err_free_netdev: |
485 | free_netdev(netdev); | |
6905e5a5 DC |
486 | free_mdev_resources: |
487 | mlx5e_destroy_mdev_resources(mdev); | |
488 | ||
48935bbb SM |
489 | return NULL; |
490 | } | |
491 | EXPORT_SYMBOL(mlx5_rdma_netdev_alloc); | |
492 | ||
693dfd5a | 493 | void mlx5_rdma_netdev_free(struct net_device *netdev) |
48935bbb SM |
494 | { |
495 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); | |
496 | const struct mlx5e_profile *profile = priv->profile; | |
497 | ||
498 | mlx5e_detach_netdev(priv); | |
499 | profile->cleanup(priv); | |
500 | destroy_workqueue(priv->wq); | |
501 | free_netdev(netdev); | |
502 | ||
503 | mlx5e_destroy_mdev_resources(priv->mdev); | |
504 | } | |
505 | EXPORT_SYMBOL(mlx5_rdma_netdev_free); | |
506 |