Merge tag 'powerpc-4.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / lag.c
CommitLineData
7907f23a
AH
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/netdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/vport.h>
36#include "mlx5_core.h"
37
38enum {
39 MLX5_LAG_FLAG_BONDED = 1 << 0,
40};
41
42struct lag_func {
43 struct mlx5_core_dev *dev;
44 struct net_device *netdev;
45};
46
47/* Used for collection of netdev event info. */
48struct lag_tracker {
49 enum netdev_lag_tx_type tx_type;
50 struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
51 bool is_bonded;
52};
53
54/* LAG data of a ConnectX card.
55 * It serves both its phys functions.
56 */
57struct mlx5_lag {
58 u8 flags;
59 u8 v2p_map[MLX5_MAX_PORTS];
60 struct lag_func pf[MLX5_MAX_PORTS];
61 struct lag_tracker tracker;
62 struct delayed_work bond_work;
63 struct notifier_block nb;
552db7bc
MS
64
65 /* Admin state. Allow lag only if allowed is true
66 * even if network conditions for lag were met
67 */
68 bool allowed;
7907f23a
AH
69};
70
71/* General purpose, use for short periods of time.
72 * Beware of lock dependencies (preferably, no locks should be acquired
73 * under it).
74 */
75static DEFINE_MUTEX(lag_mutex);
76
77static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
78 u8 remap_port2)
79{
80 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
81 u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
82 void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
83
84 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
85
86 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
87 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
88
89 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
90}
91
92static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
93 u8 remap_port2)
94{
95 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
96 u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
97 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
98
99 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
100 MLX5_SET(modify_lag_in, in, field_select, 0x1);
101
102 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
103 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
104
105 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
106}
107
108static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
109{
110 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
111 u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
112
113 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
114
115 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
116}
117
3bc34f3b
AH
118int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
119{
120 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
121 u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
122
123 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
124
125 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
126}
127EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
128
129int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
130{
131 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
133
134 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
135
136 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
137}
138EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
139
7907f23a
AH
140static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
141{
142 return dev->priv.lag;
143}
144
145static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
146 struct net_device *ndev)
147{
148 int i;
149
150 for (i = 0; i < MLX5_MAX_PORTS; i++)
151 if (ldev->pf[i].netdev == ndev)
152 return i;
153
154 return -1;
155}
156
157static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
158{
159 return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
160}
161
162static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
163 u8 *port1, u8 *port2)
164{
165 if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
166 if (tracker->netdev_state[0].tx_enabled) {
167 *port1 = 1;
168 *port2 = 1;
169 } else {
170 *port1 = 2;
171 *port2 = 2;
172 }
173 } else {
174 *port1 = 1;
175 *port2 = 2;
176 if (!tracker->netdev_state[0].link_up)
177 *port1 = 2;
178 else if (!tracker->netdev_state[1].link_up)
179 *port2 = 1;
180 }
181}
182
183static void mlx5_activate_lag(struct mlx5_lag *ldev,
184 struct lag_tracker *tracker)
185{
186 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
187 int err;
188
189 ldev->flags |= MLX5_LAG_FLAG_BONDED;
190
191 mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
192 &ldev->v2p_map[1]);
193
194 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
195 if (err)
196 mlx5_core_err(dev0,
197 "Failed to create LAG (%d)\n",
198 err);
199}
200
201static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
202{
203 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
204 int err;
205
206 ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
207
208 err = mlx5_cmd_destroy_lag(dev0);
209 if (err)
210 mlx5_core_err(dev0,
211 "Failed to destroy LAG (%d)\n",
212 err);
213}
214
215static void mlx5_do_bond(struct mlx5_lag *ldev)
216{
217 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
218 struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
219 struct lag_tracker tracker;
220 u8 v2p_port1, v2p_port2;
221 int i, err;
552db7bc 222 bool do_bond;
7907f23a
AH
223
224 if (!dev0 || !dev1)
225 return;
226
227 mutex_lock(&lag_mutex);
228 tracker = ldev->tracker;
229 mutex_unlock(&lag_mutex);
230
552db7bc 231 do_bond = tracker.is_bonded && ldev->allowed;
edb31b16 232
552db7bc 233 if (do_bond && !mlx5_lag_is_bonded(ldev)) {
7907f23a
AH
234 for (i = 0; i < MLX5_MAX_PORTS; i++)
235 mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
236 MLX5_INTERFACE_PROTOCOL_IB);
237
238 mlx5_activate_lag(ldev, &tracker);
239
240 mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
241 mlx5_nic_vport_enable_roce(dev1);
552db7bc 242 } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
7907f23a
AH
243 mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
244 &v2p_port2);
245
246 if ((v2p_port1 != ldev->v2p_map[0]) ||
247 (v2p_port2 != ldev->v2p_map[1])) {
248 ldev->v2p_map[0] = v2p_port1;
249 ldev->v2p_map[1] = v2p_port2;
250
251 err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
252 if (err)
253 mlx5_core_err(dev0,
254 "Failed to modify LAG (%d)\n",
255 err);
256 }
552db7bc 257 } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
7907f23a
AH
258 mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
259 mlx5_nic_vport_disable_roce(dev1);
260
261 mlx5_deactivate_lag(ldev);
262
263 for (i = 0; i < MLX5_MAX_PORTS; i++)
264 if (ldev->pf[i].dev)
265 mlx5_add_dev_by_protocol(ldev->pf[i].dev,
266 MLX5_INTERFACE_PROTOCOL_IB);
267 }
268}
269
270static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
271{
272 schedule_delayed_work(&ldev->bond_work, delay);
273}
274
275static void mlx5_do_bond_work(struct work_struct *work)
276{
277 struct delayed_work *delayed_work = to_delayed_work(work);
278 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
279 bond_work);
280 int status;
281
f1ee87fe 282 status = mlx5_dev_list_trylock();
7907f23a
AH
283 if (!status) {
284 /* 1 sec delay. */
285 mlx5_queue_bond_work(ldev, HZ);
286 return;
287 }
288
289 mlx5_do_bond(ldev);
f1ee87fe 290 mlx5_dev_list_unlock();
7907f23a
AH
291}
292
293static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
294 struct lag_tracker *tracker,
295 struct net_device *ndev,
296 struct netdev_notifier_changeupper_info *info)
297{
298 struct net_device *upper = info->upper_dev, *ndev_tmp;
e497ec68 299 struct netdev_lag_upper_info *lag_upper_info = NULL;
7907f23a
AH
300 bool is_bonded;
301 int bond_status = 0;
302 int num_slaves = 0;
303 int idx;
304
305 if (!netif_is_lag_master(upper))
306 return 0;
307
e497ec68
TB
308 if (info->linking)
309 lag_upper_info = info->upper_info;
7907f23a
AH
310
311 /* The event may still be of interest if the slave does not belong to
312 * us, but is enslaved to a master which has one or more of our netdevs
313 * as slaves (e.g., if a new slave is added to a master that bonds two
314 * of our netdevs, we should unbond).
315 */
316 rcu_read_lock();
317 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
318 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
319 if (idx > -1)
320 bond_status |= (1 << idx);
321
322 num_slaves++;
323 }
324 rcu_read_unlock();
325
326 /* None of this lagdev's netdevs are slaves of this master. */
327 if (!(bond_status & 0x3))
328 return 0;
329
330 if (lag_upper_info)
331 tracker->tx_type = lag_upper_info->tx_type;
332
333 /* Determine bonding status:
334 * A device is considered bonded if both its physical ports are slaves
335 * of the same lag master, and only them.
336 * Lag mode must be activebackup or hash.
337 */
338 is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
339 (bond_status == 0x3) &&
340 ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
341 (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
342
343 if (tracker->is_bonded != is_bonded) {
344 tracker->is_bonded = is_bonded;
345 return 1;
346 }
347
348 return 0;
349}
350
351static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
352 struct lag_tracker *tracker,
353 struct net_device *ndev,
354 struct netdev_notifier_changelowerstate_info *info)
355{
356 struct netdev_lag_lower_state_info *lag_lower_info;
357 int idx;
358
359 if (!netif_is_lag_port(ndev))
360 return 0;
361
362 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
363 if (idx == -1)
364 return 0;
365
366 /* This information is used to determine virtual to physical
367 * port mapping.
368 */
369 lag_lower_info = info->lower_state_info;
370 if (!lag_lower_info)
371 return 0;
372
373 tracker->netdev_state[idx] = *lag_lower_info;
374
375 return 1;
376}
377
378static int mlx5_lag_netdev_event(struct notifier_block *this,
379 unsigned long event, void *ptr)
380{
381 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
382 struct lag_tracker tracker;
383 struct mlx5_lag *ldev;
384 int changed = 0;
385
386 if (!net_eq(dev_net(ndev), &init_net))
387 return NOTIFY_DONE;
388
389 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
390 return NOTIFY_DONE;
391
392 ldev = container_of(this, struct mlx5_lag, nb);
393 tracker = ldev->tracker;
394
395 switch (event) {
396 case NETDEV_CHANGEUPPER:
397 changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
398 ptr);
399 break;
400 case NETDEV_CHANGELOWERSTATE:
401 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
402 ndev, ptr);
403 break;
404 }
405
406 mutex_lock(&lag_mutex);
407 ldev->tracker = tracker;
408 mutex_unlock(&lag_mutex);
409
410 if (changed)
411 mlx5_queue_bond_work(ldev, 0);
412
413 return NOTIFY_DONE;
414}
415
552db7bc
MS
416static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
417{
418 if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
419 (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
420 return false;
421 else
422 return true;
423}
424
7907f23a
AH
425static struct mlx5_lag *mlx5_lag_dev_alloc(void)
426{
427 struct mlx5_lag *ldev;
428
429 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
430 if (!ldev)
431 return NULL;
432
433 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
552db7bc 434 ldev->allowed = mlx5_lag_check_prereq(ldev);
7907f23a
AH
435
436 return ldev;
437}
438
439static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
440{
441 kfree(ldev);
442}
443
444static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
445 struct mlx5_core_dev *dev,
446 struct net_device *netdev)
447{
448 unsigned int fn = PCI_FUNC(dev->pdev->devfn);
449
450 if (fn >= MLX5_MAX_PORTS)
451 return;
452
453 mutex_lock(&lag_mutex);
454 ldev->pf[fn].dev = dev;
455 ldev->pf[fn].netdev = netdev;
456 ldev->tracker.netdev_state[fn].link_up = 0;
457 ldev->tracker.netdev_state[fn].tx_enabled = 0;
458
552db7bc 459 ldev->allowed = mlx5_lag_check_prereq(ldev);
7907f23a 460 dev->priv.lag = ldev;
552db7bc 461
7907f23a
AH
462 mutex_unlock(&lag_mutex);
463}
464
465static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
466 struct mlx5_core_dev *dev)
467{
468 int i;
469
470 for (i = 0; i < MLX5_MAX_PORTS; i++)
471 if (ldev->pf[i].dev == dev)
472 break;
473
474 if (i == MLX5_MAX_PORTS)
475 return;
476
477 mutex_lock(&lag_mutex);
478 memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
479
480 dev->priv.lag = NULL;
552db7bc 481 ldev->allowed = mlx5_lag_check_prereq(ldev);
7907f23a
AH
482 mutex_unlock(&lag_mutex);
483}
484
7907f23a
AH
485/* Must be called with intf_mutex held */
486void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
487{
488 struct mlx5_lag *ldev = NULL;
489 struct mlx5_core_dev *tmp_dev;
7907f23a
AH
490
491 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
492 !MLX5_CAP_GEN(dev, lag_master) ||
493 (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
494 return;
495
f1ee87fe
MHY
496 tmp_dev = mlx5_get_next_phys_dev(dev);
497 if (tmp_dev)
498 ldev = tmp_dev->priv.lag;
7907f23a
AH
499
500 if (!ldev) {
501 ldev = mlx5_lag_dev_alloc();
502 if (!ldev) {
503 mlx5_core_err(dev, "Failed to alloc lag dev\n");
504 return;
505 }
506 }
507
508 mlx5_lag_dev_add_pf(ldev, dev, netdev);
509
510 if (!ldev->nb.notifier_call) {
511 ldev->nb.notifier_call = mlx5_lag_netdev_event;
512 if (register_netdevice_notifier(&ldev->nb)) {
513 ldev->nb.notifier_call = NULL;
514 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
515 }
516 }
517}
518
519/* Must be called with intf_mutex held */
520void mlx5_lag_remove(struct mlx5_core_dev *dev)
521{
522 struct mlx5_lag *ldev;
523 int i;
524
525 ldev = mlx5_lag_dev_get(dev);
526 if (!ldev)
527 return;
528
529 if (mlx5_lag_is_bonded(ldev))
530 mlx5_deactivate_lag(ldev);
531
532 mlx5_lag_dev_remove_pf(ldev, dev);
533
534 for (i = 0; i < MLX5_MAX_PORTS; i++)
535 if (ldev->pf[i].dev)
536 break;
537
538 if (i == MLX5_MAX_PORTS) {
539 if (ldev->nb.notifier_call)
540 unregister_netdevice_notifier(&ldev->nb);
541 cancel_delayed_work_sync(&ldev->bond_work);
542 mlx5_lag_dev_free(ldev);
543 }
544}
545
546bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
547{
548 struct mlx5_lag *ldev;
549 bool res;
550
551 mutex_lock(&lag_mutex);
552 ldev = mlx5_lag_dev_get(dev);
553 res = ldev && mlx5_lag_is_bonded(ldev);
554 mutex_unlock(&lag_mutex);
555
556 return res;
557}
558EXPORT_SYMBOL(mlx5_lag_is_active);
559
552db7bc
MS
560static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow)
561{
562 struct mlx5_lag *ldev;
563 int ret = 0;
564 bool lag_active;
565
566 mlx5_dev_list_lock();
567
568 ldev = mlx5_lag_dev_get(dev);
569 if (!ldev) {
570 ret = -ENODEV;
571 goto unlock;
572 }
573 lag_active = mlx5_lag_is_bonded(ldev);
574 if (!mlx5_lag_check_prereq(ldev) && allow) {
575 ret = -EINVAL;
576 goto unlock;
577 }
578 if (ldev->allowed == allow)
579 goto unlock;
580 ldev->allowed = allow;
581 if ((lag_active && !allow) || allow)
582 mlx5_do_bond(ldev);
583unlock:
584 mlx5_dev_list_unlock();
585 return ret;
586}
587
588int mlx5_lag_forbid(struct mlx5_core_dev *dev)
589{
590 return mlx5_lag_set_state(dev, false);
591}
592
593int mlx5_lag_allow(struct mlx5_core_dev *dev)
594{
595 return mlx5_lag_set_state(dev, true);
596}
597
6a32047a
AH
598struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
599{
600 struct net_device *ndev = NULL;
601 struct mlx5_lag *ldev;
602
603 mutex_lock(&lag_mutex);
604 ldev = mlx5_lag_dev_get(dev);
605
606 if (!(ldev && mlx5_lag_is_bonded(ldev)))
607 goto unlock;
608
609 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
610 ndev = ldev->tracker.netdev_state[0].tx_enabled ?
611 ldev->pf[0].netdev : ldev->pf[1].netdev;
612 } else {
613 ndev = ldev->pf[0].netdev;
614 }
615 if (ndev)
616 dev_hold(ndev);
617
618unlock:
619 mutex_unlock(&lag_mutex);
620
621 return ndev;
622}
623EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
624
917b41aa
AH
625bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
626{
627 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
628 priv);
629 struct mlx5_lag *ldev;
630
631 if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
632 return true;
633
634 ldev = mlx5_lag_dev_get(dev);
635 if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
636 return true;
637
638 /* If bonded, we do not add an IB device for PF1. */
639 return false;
640}