net/mlx5: Lag, use hash when in roce lag on 4 ports
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / lag / port_sel.c
CommitLineData
1065e001
MG
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
3
4#include <linux/netdevice.h>
5#include "lag.h"
6
dc48516e 7enum {
8e25a2bc
MG
8 MLX5_LAG_FT_LEVEL_TTC,
9 MLX5_LAG_FT_LEVEL_INNER_TTC,
dc48516e
MG
10 MLX5_LAG_FT_LEVEL_DEFINER,
11};
12
13static struct mlx5_flow_group *
14mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
15 struct mlx5_flow_definer *definer)
16{
17 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
18 struct mlx5_flow_group *fg;
19 u32 *in;
20
21 in = kvzalloc(inlen, GFP_KERNEL);
22 if (!in)
23 return ERR_PTR(-ENOMEM);
24
25 MLX5_SET(create_flow_group_in, in, match_definer_id,
26 mlx5_get_match_definer_id(definer));
27 MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
28 MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
29 MLX5_SET(create_flow_group_in, in, group_type,
30 MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
31
32 fg = mlx5_create_flow_group(ft, in);
33 kvfree(in);
34 return fg;
35}
36
37static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
38 struct mlx5_lag_definer *lag_definer,
39 u8 port1, u8 port2)
40{
41 struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
42 struct mlx5_flow_table_attr ft_attr = {};
43 struct mlx5_flow_destination dest = {};
44 MLX5_DECLARE_FLOW_ACT(flow_act);
45 struct mlx5_flow_namespace *ns;
46 int err, i;
47
48 ft_attr.max_fte = MLX5_MAX_PORTS;
49 ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
50
51 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
52 if (!ns) {
53 mlx5_core_warn(dev, "Failed to get port selection namespace\n");
54 return -EOPNOTSUPP;
55 }
56
57 lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
58 if (IS_ERR(lag_definer->ft)) {
59 mlx5_core_warn(dev, "Failed to create port selection table\n");
60 return PTR_ERR(lag_definer->ft);
61 }
62
63 lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
64 lag_definer->definer);
65 if (IS_ERR(lag_definer->fg)) {
66 err = PTR_ERR(lag_definer->fg);
67 goto destroy_ft;
68 }
69
70 dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
71 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
72 flow_act.flags |= FLOW_ACT_NO_APPEND;
73 for (i = 0; i < MLX5_MAX_PORTS; i++) {
74 u8 affinity = i == 0 ? port1 : port2;
75
76 dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
77 vhca_id);
78 lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
79 NULL, &flow_act,
80 &dest, 1);
81 if (IS_ERR(lag_definer->rules[i])) {
82 err = PTR_ERR(lag_definer->rules[i]);
83 while (i--)
84 mlx5_del_flow_rules(lag_definer->rules[i]);
85 goto destroy_fg;
86 }
87 }
88
89 return 0;
90
91destroy_fg:
92 mlx5_destroy_flow_group(lag_definer->fg);
93destroy_ft:
94 mlx5_destroy_flow_table(lag_definer->ft);
95 return err;
96}
97
e465550b
MG
98static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
99 enum mlx5_traffic_types tt)
100{
101 int format_id;
102 u8 *ipv6;
103
104 switch (tt) {
105 case MLX5_TT_IPV4_UDP:
106 case MLX5_TT_IPV4_TCP:
107 format_id = 23;
108 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
109 inner_l4_sport);
110 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
111 inner_l4_dport);
112 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
113 inner_ip_src_addr);
114 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
115 inner_ip_dest_addr);
116 break;
117 case MLX5_TT_IPV4:
118 format_id = 23;
119 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
120 inner_l3_type);
121 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
122 inner_dmac_47_16);
123 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
124 inner_dmac_15_0);
125 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
126 inner_smac_47_16);
127 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
128 inner_smac_15_0);
129 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
130 inner_ip_src_addr);
131 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
132 inner_ip_dest_addr);
133 break;
134 case MLX5_TT_IPV6_TCP:
135 case MLX5_TT_IPV6_UDP:
136 format_id = 31;
137 MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
138 inner_l4_sport);
139 MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
140 inner_l4_dport);
141 ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
142 inner_ip_dest_addr);
143 memset(ipv6, 0xff, 16);
144 ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
145 inner_ip_src_addr);
146 memset(ipv6, 0xff, 16);
147 break;
148 case MLX5_TT_IPV6:
149 format_id = 32;
150 ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
151 inner_ip_dest_addr);
152 memset(ipv6, 0xff, 16);
153 ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
154 inner_ip_src_addr);
155 memset(ipv6, 0xff, 16);
156 MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
157 inner_dmac_47_16);
158 MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
159 inner_dmac_15_0);
160 MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
161 inner_smac_47_16);
162 MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
163 inner_smac_15_0);
164 break;
165 default:
166 format_id = 23;
167 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
168 inner_l3_type);
169 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
170 inner_dmac_47_16);
171 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
172 inner_dmac_15_0);
173 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
174 inner_smac_47_16);
175 MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
176 inner_smac_15_0);
177 break;
178 }
179
180 return format_id;
181}
182
183static int mlx5_lag_set_definer(u32 *match_definer_mask,
184 enum mlx5_traffic_types tt, bool tunnel,
185 enum netdev_lag_hash hash)
186{
187 int format_id;
188 u8 *ipv6;
189
190 if (tunnel)
191 return mlx5_lag_set_definer_inner(match_definer_mask, tt);
192
193 switch (tt) {
194 case MLX5_TT_IPV4_UDP:
195 case MLX5_TT_IPV4_TCP:
196 format_id = 22;
197 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
198 outer_l4_sport);
199 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
200 outer_l4_dport);
201 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
202 outer_ip_src_addr);
203 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
204 outer_ip_dest_addr);
205 break;
206 case MLX5_TT_IPV4:
207 format_id = 22;
208 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
209 outer_l3_type);
210 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
211 outer_dmac_47_16);
212 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
213 outer_dmac_15_0);
214 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
215 outer_smac_47_16);
216 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
217 outer_smac_15_0);
218 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
219 outer_ip_src_addr);
220 MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
221 outer_ip_dest_addr);
222 break;
223 case MLX5_TT_IPV6_TCP:
224 case MLX5_TT_IPV6_UDP:
225 format_id = 29;
226 MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
227 outer_l4_sport);
228 MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
229 outer_l4_dport);
230 ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
231 outer_ip_dest_addr);
232 memset(ipv6, 0xff, 16);
233 ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
234 outer_ip_src_addr);
235 memset(ipv6, 0xff, 16);
236 break;
237 case MLX5_TT_IPV6:
238 format_id = 30;
239 ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
240 outer_ip_dest_addr);
241 memset(ipv6, 0xff, 16);
242 ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
243 outer_ip_src_addr);
244 memset(ipv6, 0xff, 16);
245 MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
246 outer_dmac_47_16);
247 MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
248 outer_dmac_15_0);
249 MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
250 outer_smac_47_16);
251 MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
252 outer_smac_15_0);
253 break;
254 default:
255 format_id = 0;
256 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
257 outer_smac_47_16);
258 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
259 outer_smac_15_0);
260
261 if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
262 MLX5_SET_TO_ONES(match_definer_format_0,
263 match_definer_mask,
264 outer_first_vlan_vid);
265 break;
266 }
267
268 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
269 outer_ethertype);
270 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
271 outer_dmac_47_16);
272 MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
273 outer_dmac_15_0);
274 break;
275 }
276
277 return format_id;
278}
279
dc48516e
MG
280static struct mlx5_lag_definer *
281mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
282 enum mlx5_traffic_types tt, bool tunnel, u8 port1,
283 u8 port2)
284{
285 struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
286 struct mlx5_lag_definer *lag_definer;
287 u32 *match_definer_mask;
288 int format_id, err;
289
290 lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
291 if (!lag_definer)
c7ebe23c 292 return ERR_PTR(-ENOMEM);
dc48516e
MG
293
294 match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
295 match_mask),
296 GFP_KERNEL);
297 if (!match_definer_mask) {
298 err = -ENOMEM;
299 goto free_lag_definer;
300 }
301
302 format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
303 lag_definer->definer =
304 mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
305 format_id, match_definer_mask);
306 if (IS_ERR(lag_definer->definer)) {
307 err = PTR_ERR(lag_definer->definer);
308 goto free_mask;
309 }
310
311 err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
312 if (err)
313 goto destroy_match_definer;
314
315 kvfree(match_definer_mask);
316
317 return lag_definer;
318
319destroy_match_definer:
320 mlx5_destroy_match_definer(dev, lag_definer->definer);
321free_mask:
322 kvfree(match_definer_mask);
323free_lag_definer:
324 kfree(lag_definer);
325 return ERR_PTR(err);
326}
327
328static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
329 struct mlx5_lag_definer *lag_definer)
330{
331 struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
332 int i;
333
334 for (i = 0; i < MLX5_MAX_PORTS; i++)
335 mlx5_del_flow_rules(lag_definer->rules[i]);
336 mlx5_destroy_flow_group(lag_definer->fg);
337 mlx5_destroy_flow_table(lag_definer->ft);
338 mlx5_destroy_match_definer(dev, lag_definer->definer);
339 kfree(lag_definer);
340}
341
342static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
343{
344 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
345 int tt;
346
347 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
348 if (port_sel->outer.definers[tt])
349 mlx5_lag_destroy_definer(ldev,
350 port_sel->outer.definers[tt]);
351 if (port_sel->inner.definers[tt])
352 mlx5_lag_destroy_definer(ldev,
353 port_sel->inner.definers[tt]);
354 }
355}
356
357static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
358 enum netdev_lag_hash hash_type,
359 u8 port1, u8 port2)
360{
361 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
362 struct mlx5_lag_definer *lag_definer;
363 int tt, err;
364
365 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
366 lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
367 false, port1, port2);
368 if (IS_ERR(lag_definer)) {
369 err = PTR_ERR(lag_definer);
370 goto destroy_definers;
371 }
372 port_sel->outer.definers[tt] = lag_definer;
373
374 if (!port_sel->tunnel)
375 continue;
376
377 lag_definer =
378 mlx5_lag_create_definer(ldev, hash_type, tt,
379 true, port1, port2);
380 if (IS_ERR(lag_definer)) {
381 err = PTR_ERR(lag_definer);
382 goto destroy_definers;
383 }
384 port_sel->inner.definers[tt] = lag_definer;
385 }
386
387 return 0;
388
389destroy_definers:
390 mlx5_lag_destroy_definers(ldev);
391 return err;
392}
393
1065e001
MG
394static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
395 enum netdev_lag_hash hash)
396{
397 port_sel->tunnel = false;
398
399 switch (hash) {
400 case NETDEV_LAG_HASH_E34:
401 port_sel->tunnel = true;
402 fallthrough;
403 case NETDEV_LAG_HASH_L34:
404 set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
405 set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
406 set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
407 set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
408 set_bit(MLX5_TT_IPV4, port_sel->tt_map);
409 set_bit(MLX5_TT_IPV6, port_sel->tt_map);
410 set_bit(MLX5_TT_ANY, port_sel->tt_map);
411 break;
412 case NETDEV_LAG_HASH_E23:
413 port_sel->tunnel = true;
414 fallthrough;
415 case NETDEV_LAG_HASH_L23:
416 set_bit(MLX5_TT_IPV4, port_sel->tt_map);
417 set_bit(MLX5_TT_IPV6, port_sel->tt_map);
418 set_bit(MLX5_TT_ANY, port_sel->tt_map);
419 break;
420 default:
421 set_bit(MLX5_TT_ANY, port_sel->tt_map);
422 break;
423 }
424}
8e25a2bc
MG
425
426#define SET_IGNORE_DESTS_BITS(tt_map, dests) \
427 do { \
428 int idx; \
429 \
430 for_each_clear_bit(idx, tt_map, MLX5_NUM_TT) \
431 set_bit(idx, dests); \
432 } while (0)
433
434static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
435 struct ttc_params *ttc_params)
436{
437 struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
438 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
439 struct mlx5_flow_table_attr *ft_attr;
440 int tt;
441
442 ttc_params->ns = mlx5_get_flow_namespace(dev,
443 MLX5_FLOW_NAMESPACE_PORT_SEL);
444 ft_attr = &ttc_params->ft_attr;
445 ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
446
447 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
448 ttc_params->dests[tt].type =
449 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
450 ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
451 }
452 SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
453}
454
455static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
456 struct ttc_params *ttc_params)
457{
458 struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
459 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
460 struct mlx5_flow_table_attr *ft_attr;
461 int tt;
462
463 ttc_params->ns = mlx5_get_flow_namespace(dev,
464 MLX5_FLOW_NAMESPACE_PORT_SEL);
465 ft_attr = &ttc_params->ft_attr;
466 ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
467
468 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
469 ttc_params->dests[tt].type =
470 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
471 ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
472 }
473 SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
474
475 ttc_params->inner_ttc = port_sel->tunnel;
476 if (!port_sel->tunnel)
477 return;
478
479 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
480 ttc_params->tunnel_dests[tt].type =
481 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
482 ttc_params->tunnel_dests[tt].ft =
483 mlx5_get_ttc_flow_table(port_sel->inner.ttc);
484 }
485}
486
487static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
488{
489 struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
490 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
491 struct ttc_params ttc_params = {};
492
493 mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
494 port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
495 if (IS_ERR(port_sel->outer.ttc))
496 return PTR_ERR(port_sel->outer.ttc);
497
498 return 0;
499}
500
501static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
502{
503 struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
504 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
505 struct ttc_params ttc_params = {};
506
507 mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
a042d7f5 508 port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
8e25a2bc
MG
509 if (IS_ERR(port_sel->inner.ttc))
510 return PTR_ERR(port_sel->inner.ttc);
511
512 return 0;
513}
b7267869
MG
514
515int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
516 enum netdev_lag_hash hash_type, u8 port1, u8 port2)
517{
518 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
519 int err;
520
521 set_tt_map(port_sel, hash_type);
522 err = mlx5_lag_create_definers(ldev, hash_type, port1, port2);
523 if (err)
524 return err;
525
526 if (port_sel->tunnel) {
527 err = mlx5_lag_create_inner_ttc_table(ldev);
528 if (err)
529 goto destroy_definers;
530 }
531
532 err = mlx5_lag_create_ttc_table(ldev);
533 if (err)
534 goto destroy_inner;
535
536 return 0;
537
538destroy_inner:
539 if (port_sel->tunnel)
540 mlx5_destroy_ttc_table(port_sel->inner.ttc);
541destroy_definers:
542 mlx5_lag_destroy_definers(ldev);
543 return err;
544}
545
546static int
547mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
548 struct mlx5_lag_definer **definers,
549 u8 port1, u8 port2)
550{
551 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
552 struct mlx5_flow_destination dest = {};
553 int err;
554 int tt;
555
556 dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
557 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
558
559 for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
560 struct mlx5_flow_handle **rules = definers[tt]->rules;
561
562 if (ldev->v2p_map[MLX5_LAG_P1] != port1) {
563 dest.vport.vhca_id =
564 MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id);
565 err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
566 &dest, NULL);
567 if (err)
568 return err;
569 }
570
571 if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
572 dest.vport.vhca_id =
573 MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id);
574 err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2],
575 &dest, NULL);
576 if (err)
577 return err;
578 }
579 }
580
581 return 0;
582}
583
584int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
585{
586 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
587 int err;
588
589 err = mlx5_lag_modify_definers_destinations(ldev,
590 port_sel->outer.definers,
591 port1, port2);
592 if (err)
593 return err;
594
595 if (!port_sel->tunnel)
596 return 0;
597
598 return mlx5_lag_modify_definers_destinations(ldev,
599 port_sel->inner.definers,
600 port1, port2);
601}
602
603void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
604{
605 struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
606
607 mlx5_destroy_ttc_table(port_sel->outer.ttc);
608 if (port_sel->tunnel)
609 mlx5_destroy_ttc_table(port_sel->inner.ttc);
610 mlx5_lag_destroy_definers(ldev);
ffdf4531 611 memset(port_sel, 0, sizeof(*port_sel));
b7267869 612}