net/mlx5e: Advertise mlx5 ethernet driver updates sk_buff md_dst for MACsec
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / dev.c
CommitLineData
f1ee87fe
MHY
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/driver.h>
912cebf4 34#include <linux/mlx5/eswitch.h>
74c9729d 35#include <linux/mlx5/mlx5_ifc_vdpa.h>
8a543184 36#include <linux/mlx5/vport.h>
f1ee87fe 37#include "mlx5_core.h"
b637ac5d 38#include "devlink.h"
02ceda65 39#include "lag/lag.h"
f1ee87fe 40
a925b5e3 41static DEFINE_IDA(mlx5_adev_ida);
f1ee87fe 42
912cebf4
LR
43static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
44{
45 if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
46 return false;
47
48 if (!MLX5_ESWITCH_MANAGER(dev))
49 return false;
50
e8711402 51 if (!is_mdev_switchdev_mode(dev))
912cebf4
LR
52 return false;
53
54 return true;
55}
56
a17beb28 57bool mlx5_eth_supported(struct mlx5_core_dev *dev)
912cebf4
LR
58{
59 if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
60 return false;
61
912cebf4
LR
62 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
63 return false;
64
65 if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
66 mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
67 return false;
68 }
69
70 if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
71 mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
72 return false;
73 }
74
75 if (!MLX5_CAP_ETH(dev, csum_cap)) {
76 mlx5_core_warn(dev, "Missing csum_cap capability\n");
77 return false;
78 }
79
80 if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
81 mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
82 return false;
83 }
84
85 if (!MLX5_CAP_ETH(dev, vlan_cap)) {
86 mlx5_core_warn(dev, "Missing vlan_cap capability\n");
87 return false;
88 }
89
90 if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
91 mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
92 return false;
93 }
94
95 if (MLX5_CAP_FLOWTABLE(dev,
96 flow_table_properties_nic_receive.max_ft_level) < 3) {
97 mlx5_core_warn(dev, "max_ft_level < 3\n");
98 return false;
99 }
100
101 if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
102 mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
103 if (!MLX5_CAP_GEN(dev, cq_moderation))
104 mlx5_core_warn(dev, "CQ moderation is not supported\n");
105
106 return true;
107}
108
70862a5d 109bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
74c9729d
LR
110{
111 if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
112 return false;
113
114 if (mlx5_core_is_pf(dev))
115 return false;
116
117 if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
118 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
119 return false;
120
121 if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
122 MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
123 return false;
124
125 if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
126 return false;
127
128 return true;
129}
130
70862a5d
PP
131static bool is_vnet_enabled(struct mlx5_core_dev *dev)
132{
133 union devlink_param_value val;
134 int err;
135
075935f0
JP
136 err = devl_param_driverinit_value_get(priv_to_devlink(dev),
137 DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
138 &val);
70862a5d
PP
139 return err ? false : val.vbool;
140}
141
93f82444
LR
142static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
143{
144 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
145 return false;
146
147 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
148 return false;
149
150 if (!is_eth_rep_supported(dev))
151 return false;
152
93f82444
LR
153 if (mlx5_core_mp_enabled(dev))
154 return false;
155
156 return true;
157}
158
159static bool is_mp_supported(struct mlx5_core_dev *dev)
160{
161 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
162 return false;
163
164 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
165 return false;
166
167 if (is_ib_rep_supported(dev))
168 return false;
169
170 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
171 return false;
172
173 if (!mlx5_core_is_mp_slave(dev))
174 return false;
175
176 return true;
177}
178
87158ced 179bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
93f82444
LR
180{
181 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
182 return false;
183
184 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
185 return false;
186
187 if (is_ib_rep_supported(dev))
188 return false;
189
190 if (is_mp_supported(dev))
191 return false;
192
193 return true;
194}
195
87158ced
PP
196static bool is_ib_enabled(struct mlx5_core_dev *dev)
197{
198 union devlink_param_value val;
199 int err;
200
075935f0
JP
201 err = devl_param_driverinit_value_get(priv_to_devlink(dev),
202 DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
203 &val);
87158ced
PP
204 return err ? false : val.vbool;
205}
206
496fd0a2
JP
207static bool is_dpll_supported(struct mlx5_core_dev *dev)
208{
209 if (!IS_ENABLED(CONFIG_MLX5_DPLL))
210 return false;
211
212 if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
507472ed 213 mlx5_core_dbg(dev, "Missing SyncE capability\n");
496fd0a2
JP
214 return false;
215 }
216
217 return true;
218}
219
601c10c8 220enum {
601c10c8 221 MLX5_INTERFACE_PROTOCOL_ETH,
1f90aedf 222 MLX5_INTERFACE_PROTOCOL_ETH_REP,
601c10c8 223
1f90aedf 224 MLX5_INTERFACE_PROTOCOL_IB,
601c10c8
LR
225 MLX5_INTERFACE_PROTOCOL_IB_REP,
226 MLX5_INTERFACE_PROTOCOL_MPIB,
601c10c8
LR
227
228 MLX5_INTERFACE_PROTOCOL_VNET,
496fd0a2
JP
229
230 MLX5_INTERFACE_PROTOCOL_DPLL,
601c10c8
LR
231};
232
a925b5e3
LR
233static const struct mlx5_adev_device {
234 const char *suffix;
235 bool (*is_supported)(struct mlx5_core_dev *dev);
a17beb28 236 bool (*is_enabled)(struct mlx5_core_dev *dev);
74c9729d 237} mlx5_adev_devices[] = {
601c10c8 238 [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
70862a5d
PP
239 .is_supported = &mlx5_vnet_supported,
240 .is_enabled = &is_vnet_enabled },
93f82444 241 [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
87158ced
PP
242 .is_supported = &mlx5_rdma_supported,
243 .is_enabled = &is_ib_enabled },
912cebf4 244 [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
a17beb28 245 .is_supported = &mlx5_eth_supported,
b637ac5d 246 .is_enabled = &mlx5_core_is_eth_enabled },
912cebf4
LR
247 [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
248 .is_supported = &is_eth_rep_supported },
93f82444
LR
249 [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
250 .is_supported = &is_ib_rep_supported },
251 [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
252 .is_supported = &is_mp_supported },
496fd0a2
JP
253 [MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
254 .is_supported = &is_dpll_supported },
74c9729d 255};
a925b5e3
LR
256
257int mlx5_adev_idx_alloc(void)
258{
259 return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
260}
261
262void mlx5_adev_idx_free(int idx)
263{
264 ida_free(&mlx5_adev_ida, idx);
265}
266
267int mlx5_adev_init(struct mlx5_core_dev *dev)
268{
269 struct mlx5_priv *priv = &dev->priv;
270
271 priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
272 sizeof(struct mlx5_adev *), GFP_KERNEL);
273 if (!priv->adev)
274 return -ENOMEM;
275
276 return 0;
277}
278
279void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
280{
281 struct mlx5_priv *priv = &dev->priv;
282
283 kfree(priv->adev);
284}
97834eba 285
a925b5e3
LR
286static void adev_release(struct device *dev)
287{
288 struct mlx5_adev *mlx5_adev =
289 container_of(dev, struct mlx5_adev, adev.dev);
290 struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
291 int idx = mlx5_adev->idx;
292
293 kfree(mlx5_adev);
294 priv->adev[idx] = NULL;
295}
296
297static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
298{
299 const char *suffix = mlx5_adev_devices[idx].suffix;
300 struct auxiliary_device *adev;
301 struct mlx5_adev *madev;
302 int ret;
303
304 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
305 if (!madev)
306 return ERR_PTR(-ENOMEM);
307
308 adev = &madev->adev;
309 adev->id = dev->priv.adev_idx;
310 adev->name = suffix;
311 adev->dev.parent = dev->device;
312 adev->dev.release = adev_release;
313 madev->mdev = dev;
314 madev->idx = idx;
315
316 ret = auxiliary_device_init(adev);
317 if (ret) {
318 kfree(madev);
319 return ERR_PTR(ret);
320 }
321
322 ret = auxiliary_device_add(adev);
323 if (ret) {
324 auxiliary_device_uninit(adev);
325 return ERR_PTR(ret);
326 }
327 return madev;
328}
329
330static void del_adev(struct auxiliary_device *adev)
331{
332 auxiliary_device_delete(adev);
333 auxiliary_device_uninit(adev);
334}
335
e71383fb
SD
336void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev)
337{
b430c1b4 338 mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
e71383fb 339 dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
b430c1b4 340 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
e71383fb
SD
341}
342
343bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev)
344{
345 return dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
346}
347
a925b5e3 348int mlx5_attach_device(struct mlx5_core_dev *dev)
f1ee87fe
MHY
349{
350 struct mlx5_priv *priv = &dev->priv;
a925b5e3
LR
351 struct auxiliary_device *adev;
352 struct auxiliary_driver *adrv;
a925b5e3 353 int ret = 0, i;
f1ee87fe 354
84a433a4 355 devl_assert_locked(priv_to_devlink(dev));
b430c1b4 356 mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
a5ae8fc9 357 priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
a925b5e3
LR
358 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
359 if (!priv->adev[i]) {
360 bool is_supported = false;
361
a17beb28
PP
362 if (mlx5_adev_devices[i].is_enabled) {
363 bool enabled;
364
365 enabled = mlx5_adev_devices[i].is_enabled(dev);
366 if (!enabled)
367 continue;
368 }
369
a925b5e3
LR
370 if (mlx5_adev_devices[i].is_supported)
371 is_supported = mlx5_adev_devices[i].is_supported(dev);
372
373 if (!is_supported)
374 continue;
375
376 priv->adev[i] = add_adev(dev, i);
377 if (IS_ERR(priv->adev[i])) {
378 ret = PTR_ERR(priv->adev[i]);
379 priv->adev[i] = NULL;
380 }
381 } else {
382 adev = &priv->adev[i]->adev;
2058cc9c
LR
383
384 /* Pay attention that this is not PCI driver that
385 * mlx5_core_dev is connected, but auxiliary driver.
2058cc9c
LR
386 */
387 if (!adev->dev.driver)
388 continue;
a925b5e3
LR
389 adrv = to_auxiliary_drv(adev->dev.driver);
390
391 if (adrv->resume)
392 ret = adrv->resume(adev);
393 }
394 if (ret) {
395 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
396 i, mlx5_adev_devices[i].suffix);
397
398 break;
399 }
400 }
b430c1b4 401 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
a925b5e3 402 return ret;
f1ee87fe
MHY
403}
404
72ed5d56 405void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
f1ee87fe
MHY
406{
407 struct mlx5_priv *priv = &dev->priv;
a925b5e3
LR
408 struct auxiliary_device *adev;
409 struct auxiliary_driver *adrv;
a925b5e3
LR
410 pm_message_t pm = {};
411 int i;
f1ee87fe 412
84a433a4 413 devl_assert_locked(priv_to_devlink(dev));
b430c1b4 414 mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
a925b5e3
LR
415 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
416 if (!priv->adev[i])
417 continue;
418
a17beb28
PP
419 if (mlx5_adev_devices[i].is_enabled) {
420 bool enabled;
421
422 enabled = mlx5_adev_devices[i].is_enabled(dev);
423 if (!enabled)
424 goto skip_suspend;
425 }
426
a925b5e3 427 adev = &priv->adev[i]->adev;
2058cc9c
LR
428 /* Auxiliary driver was unbind manually through sysfs */
429 if (!adev->dev.driver)
430 goto skip_suspend;
431
a925b5e3
LR
432 adrv = to_auxiliary_drv(adev->dev.driver);
433
72ed5d56 434 if (adrv->suspend && suspend) {
a925b5e3
LR
435 adrv->suspend(adev, pm);
436 continue;
437 }
438
2058cc9c 439skip_suspend:
a925b5e3
LR
440 del_adev(&priv->adev[i]->adev);
441 priv->adev[i] = NULL;
442 }
a5ae8fc9 443 priv->flags |= MLX5_PRIV_FLAGS_DETACH;
b430c1b4 444 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
f1ee87fe
MHY
445}
446
a925b5e3 447int mlx5_register_device(struct mlx5_core_dev *dev)
f1ee87fe 448{
a925b5e3
LR
449 int ret;
450
84a433a4 451 devl_assert_locked(priv_to_devlink(dev));
b430c1b4 452 mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
a925b5e3
LR
453 dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
454 ret = mlx5_rescan_drivers_locked(dev);
b430c1b4 455 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
a925b5e3 456 if (ret)
601c10c8 457 mlx5_unregister_device(dev);
a925b5e3 458
a925b5e3 459 return ret;
f1ee87fe
MHY
460}
461
462void mlx5_unregister_device(struct mlx5_core_dev *dev)
463{
84a433a4 464 devl_assert_locked(priv_to_devlink(dev));
b430c1b4 465 mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
8e7e2e8e 466 dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
a925b5e3 467 mlx5_rescan_drivers_locked(dev);
b430c1b4 468 mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
f1ee87fe
MHY
469}
470
a925b5e3
LR
471static int add_drivers(struct mlx5_core_dev *dev)
472{
473 struct mlx5_priv *priv = &dev->priv;
474 int i, ret = 0;
475
476 for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
477 bool is_supported = false;
478
479 if (priv->adev[i])
480 continue;
481
e71383fb
SD
482 if (mlx5_adev_devices[i].is_enabled &&
483 !(mlx5_adev_devices[i].is_enabled(dev)))
484 continue;
485
a925b5e3
LR
486 if (mlx5_adev_devices[i].is_supported)
487 is_supported = mlx5_adev_devices[i].is_supported(dev);
488
489 if (!is_supported)
490 continue;
491
492 priv->adev[i] = add_adev(dev, i);
493 if (IS_ERR(priv->adev[i])) {
494 mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
495 i, mlx5_adev_devices[i].suffix);
496 /* We continue to rescan drivers and leave to the caller
497 * to make decision if to release everything or continue.
498 */
499 ret = PTR_ERR(priv->adev[i]);
500 priv->adev[i] = NULL;
501 }
502 }
503 return ret;
504}
505
506static void delete_drivers(struct mlx5_core_dev *dev)
507{
508 struct mlx5_priv *priv = &dev->priv;
509 bool delete_all;
510 int i;
511
512 delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
513
514 for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
515 bool is_supported = false;
516
517 if (!priv->adev[i])
518 continue;
519
a17beb28
PP
520 if (mlx5_adev_devices[i].is_enabled) {
521 bool enabled;
522
523 enabled = mlx5_adev_devices[i].is_enabled(dev);
524 if (!enabled)
525 goto del_adev;
526 }
527
a925b5e3
LR
528 if (mlx5_adev_devices[i].is_supported && !delete_all)
529 is_supported = mlx5_adev_devices[i].is_supported(dev);
530
531 if (is_supported)
532 continue;
533
a17beb28 534del_adev:
a925b5e3
LR
535 del_adev(&priv->adev[i]->adev);
536 priv->adev[i] = NULL;
537 }
538}
539
540/* This function is used after mlx5_core_dev is reconfigured.
541 */
542int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
543{
544 struct mlx5_priv *priv = &dev->priv;
545
a5ae8fc9
DL
546 if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
547 return 0;
a925b5e3
LR
548
549 delete_drivers(dev);
550 if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
65a20c2e 551 return 0;
a925b5e3 552
65a20c2e 553 return add_drivers(dev);
a925b5e3
LR
554}
555
c9c079b4 556bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
8a543184
RL
557{
558 u64 fsystem_guid, psystem_guid;
559
560 fsystem_guid = mlx5_query_nic_system_image_guid(dev);
561 psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
562
563 return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
564}