net/mlx5e: XDP, Slight enhancement for WQE fetch function
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / vport.c
CommitLineData
afb736e9
AV
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/export.h>
34#include <linux/etherdevice.h>
35#include <linux/mlx5/driver.h>
d18a9470 36#include <linux/mlx5/vport.h>
2752b823 37#include <linux/mlx5/eswitch.h>
afb736e9
AV
38#include "mlx5_core.h"
39
734dc065
DJ
40/* Mutex to hold while enabling or disabling RoCE */
41static DEFINE_MUTEX(mlx5_roce_en_lock);
42
e7546514
SM
43static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
44 u16 vport, u32 *out, int outlen)
afb736e9 45{
c4f287c4 46 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
afb736e9
AV
47
48 MLX5_SET(query_vport_state_in, in, opcode,
49 MLX5_CMD_OP_QUERY_VPORT_STATE);
50 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
e7546514
SM
51 MLX5_SET(query_vport_state_in, in, vport_number, vport);
52 if (vport)
53 MLX5_SET(query_vport_state_in, in, other_vport, 1);
afb736e9 54
c4f287c4 55 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
e7546514
SM
56}
57
58u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
59{
60 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
61
62 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
63
afb736e9
AV
64 return MLX5_GET(query_vport_state_out, out, state);
65}
e7546514 66
e7546514 67int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
cbc44e76 68 u16 vport, u8 other_vport, u8 state)
e7546514 69{
c4f287c4
SM
70 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
71 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
e7546514
SM
72
73 MLX5_SET(modify_vport_state_in, in, opcode,
74 MLX5_CMD_OP_MODIFY_VPORT_STATE);
75 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
76 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
cbc44e76 77 MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
e7546514
SM
78 MLX5_SET(modify_vport_state_in, in, admin_state, state);
79
c4f287c4 80 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
e7546514 81}
afb736e9 82
e1d7d349
SM
83static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
84 u32 *out, int outlen)
85{
c4f287c4 86 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
e1d7d349
SM
87
88 MLX5_SET(query_nic_vport_context_in, in, opcode,
89 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
e1d7d349
SM
90 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
91 if (vport)
92 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
93
c4f287c4 94 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
e1d7d349
SM
95}
96
97static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
98 int inlen)
99{
c4f287c4 100 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
e1d7d349
SM
101
102 MLX5_SET(modify_nic_vport_context_in, in, opcode,
103 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
c4f287c4 104 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
e1d7d349
SM
105}
106
34e4e990
RD
107int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
108 u16 vport, u8 *min_inline)
cff92d7c
HHZ
109{
110 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
34e4e990 111 int err;
cff92d7c 112
34e4e990
RD
113 err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
114 if (!err)
115 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
116 nic_vport_context.min_wqe_inline_mode);
117 return err;
cff92d7c
HHZ
118}
119EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
120
8c7245a6
OG
121void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
122 u8 *min_inline_mode)
123{
124 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
125 case MLX5_CAP_INLINE_MODE_L2:
126 *min_inline_mode = MLX5_INLINE_MODE_L2;
127 break;
128 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
129 mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
130 break;
131 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
132 *min_inline_mode = MLX5_INLINE_MODE_NONE;
133 break;
134 }
135}
136EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
137
9def7121
HHZ
138int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
139 u16 vport, u8 min_inline)
140{
141 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
142 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
143 void *nic_vport_ctx;
144
145 MLX5_SET(modify_nic_vport_context_in, in,
146 field_select.min_inline, 1);
147 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
148 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
149
150 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
151 in, nic_vport_context);
152 MLX5_SET(nic_vport_context, nic_vport_ctx,
153 min_wqe_inline_mode, min_inline);
154
155 return mlx5_modify_nic_vport_context(mdev, in, inlen);
156}
157
e1d7d349 158int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
e1d974d0 159 u16 vport, bool other, u8 *addr)
afb736e9 160{
afb736e9 161 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
e1d974d0 162 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
afb736e9 163 u8 *out_addr;
e1d974d0 164 u32 *out;
e1d7d349 165 int err;
afb736e9 166
1b9a07ee 167 out = kvzalloc(outlen, GFP_KERNEL);
afb736e9 168 if (!out)
e1d7d349 169 return -ENOMEM;
afb736e9
AV
170
171 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
172 nic_vport_context.permanent_address);
173
e1d974d0
BW
174 MLX5_SET(query_nic_vport_context_in, in, opcode,
175 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
176 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
177 MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
178
179 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
e5f6175c
AS
180 if (!err)
181 ether_addr_copy(addr, &out_addr[2]);
afb736e9
AV
182
183 kvfree(out);
e1d7d349
SM
184 return err;
185}
186EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
187
e1d974d0
BW
188int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
189{
190 return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
191}
192EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
193
e1d7d349
SM
194int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
195 u16 vport, u8 *addr)
196{
197 void *in;
198 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
199 int err;
200 void *nic_vport_ctx;
201 u8 *perm_mac;
202
1b9a07ee
LR
203 in = kvzalloc(inlen, GFP_KERNEL);
204 if (!in)
e1d7d349 205 return -ENOMEM;
e1d7d349
SM
206
207 MLX5_SET(modify_nic_vport_context_in, in,
208 field_select.permanent_address, 1);
209 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
e1d974d0 210 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
e1d7d349
SM
211
212 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
213 in, nic_vport_context);
214 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
215 permanent_address);
216
217 ether_addr_copy(&perm_mac[2], addr);
218
219 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
220
221 kvfree(in);
222
223 return err;
afb736e9 224}
048ccca8 225EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
707c4602 226
cd255eff
SM
227int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
228{
229 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
230 u32 *out;
231 int err;
232
1b9a07ee 233 out = kvzalloc(outlen, GFP_KERNEL);
cd255eff
SM
234 if (!out)
235 return -ENOMEM;
236
237 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
238 if (!err)
239 *mtu = MLX5_GET(query_nic_vport_context_out, out,
240 nic_vport_context.mtu);
241
242 kvfree(out);
243 return err;
244}
245EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
246
247int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
248{
249 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
250 void *in;
251 int err;
252
1b9a07ee 253 in = kvzalloc(inlen, GFP_KERNEL);
cd255eff
SM
254 if (!in)
255 return -ENOMEM;
256
257 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
258 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
259
260 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
261
262 kvfree(in);
263 return err;
264}
265EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
266
e16aea27 267int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
7e4c4330 268 u16 vport,
e16aea27
SM
269 enum mlx5_list_type list_type,
270 u8 addr_list[][ETH_ALEN],
271 int *list_size)
272{
c4f287c4 273 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
e16aea27
SM
274 void *nic_vport_ctx;
275 int max_list_size;
276 int req_list_size;
277 int out_sz;
278 void *out;
279 int err;
280 int i;
281
282 req_list_size = *list_size;
283
284 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
285 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
286 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
287
288 if (req_list_size > max_list_size) {
289 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
290 req_list_size, max_list_size);
291 req_list_size = max_list_size;
292 }
293
294 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
295 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
296
e16aea27
SM
297 out = kzalloc(out_sz, GFP_KERNEL);
298 if (!out)
299 return -ENOMEM;
300
301 MLX5_SET(query_nic_vport_context_in, in, opcode,
302 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
303 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
304 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
e1d974d0 305 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
e16aea27 306
c4f287c4 307 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
e16aea27
SM
308 if (err)
309 goto out;
310
311 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
312 nic_vport_context);
313 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
314 allowed_list_size);
315
316 *list_size = req_list_size;
317 for (i = 0; i < req_list_size; i++) {
318 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
319 nic_vport_ctx,
320 current_uc_mac_address[i]) + 2;
321 ether_addr_copy(addr_list[i], mac_addr);
322 }
323out:
324 kfree(out);
325 return err;
326}
327EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
328
329int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
330 enum mlx5_list_type list_type,
331 u8 addr_list[][ETH_ALEN],
332 int list_size)
333{
334 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
335 void *nic_vport_ctx;
336 int max_list_size;
337 int in_sz;
338 void *in;
339 int err;
340 int i;
341
342 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
343 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
344 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
345
346 if (list_size > max_list_size)
347 return -ENOSPC;
348
349 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
350 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
351
352 memset(out, 0, sizeof(out));
353 in = kzalloc(in_sz, GFP_KERNEL);
354 if (!in)
355 return -ENOMEM;
356
357 MLX5_SET(modify_nic_vport_context_in, in, opcode,
358 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
359 MLX5_SET(modify_nic_vport_context_in, in,
360 field_select.addresses_list, 1);
361
362 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
363 nic_vport_context);
364
365 MLX5_SET(nic_vport_context, nic_vport_ctx,
366 allowed_list_type, list_type);
367 MLX5_SET(nic_vport_context, nic_vport_ctx,
368 allowed_list_size, list_size);
369
370 for (i = 0; i < list_size; i++) {
371 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
372 nic_vport_ctx,
373 current_uc_mac_address[i]) + 2;
374 ether_addr_copy(curr_mac, addr_list[i]);
375 }
376
c4f287c4 377 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
e16aea27
SM
378 kfree(in);
379 return err;
380}
381EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
382
c0046cf7
SM
383int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
384 u16 vlans[],
385 int list_size)
386{
387 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
388 void *nic_vport_ctx;
389 int max_list_size;
390 int in_sz;
391 void *in;
392 int err;
393 int i;
394
395 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
396
397 if (list_size > max_list_size)
398 return -ENOSPC;
399
400 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
401 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
402
403 memset(out, 0, sizeof(out));
404 in = kzalloc(in_sz, GFP_KERNEL);
405 if (!in)
406 return -ENOMEM;
407
408 MLX5_SET(modify_nic_vport_context_in, in, opcode,
409 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
410 MLX5_SET(modify_nic_vport_context_in, in,
411 field_select.addresses_list, 1);
412
413 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
414 nic_vport_context);
415
416 MLX5_SET(nic_vport_context, nic_vport_ctx,
417 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
418 MLX5_SET(nic_vport_context, nic_vport_ctx,
419 allowed_list_size, list_size);
420
421 for (i = 0; i < list_size; i++) {
422 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
423 nic_vport_ctx,
424 current_uc_mac_address[i]);
425 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
426 }
427
c4f287c4 428 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
c0046cf7
SM
429 kfree(in);
430 return err;
431}
432EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
433
9efa7525
AS
434int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
435 u64 *system_image_guid)
436{
437 u32 *out;
438 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
439
1b9a07ee 440 out = kvzalloc(outlen, GFP_KERNEL);
9efa7525
AS
441 if (!out)
442 return -ENOMEM;
443
048ccca8 444 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
9efa7525
AS
445
446 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
447 nic_vport_context.system_image_guid);
448
a5898e97 449 kvfree(out);
9efa7525
AS
450
451 return 0;
452}
453EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
454
455int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
456{
457 u32 *out;
458 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
459
1b9a07ee 460 out = kvzalloc(outlen, GFP_KERNEL);
9efa7525
AS
461 if (!out)
462 return -ENOMEM;
463
048ccca8 464 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
9efa7525
AS
465
466 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
467 nic_vport_context.node_guid);
468
a5898e97 469 kvfree(out);
9efa7525
AS
470
471 return 0;
472}
473EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
474
23898c76 475int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
7e4c4330 476 u16 vport, u64 node_guid)
23898c76
NO
477{
478 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
479 void *nic_vport_context;
23898c76
NO
480 void *in;
481 int err;
482
483 if (!vport)
484 return -EINVAL;
485 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
486 return -EACCES;
23898c76 487
1b9a07ee 488 in = kvzalloc(inlen, GFP_KERNEL);
23898c76
NO
489 if (!in)
490 return -ENOMEM;
491
492 MLX5_SET(modify_nic_vport_context_in, in,
493 field_select.node_guid, 1);
494 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
e1d974d0 495 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
23898c76
NO
496
497 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
498 in, nic_vport_context);
23898c76
NO
499 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
500
501 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
502
503 kvfree(in);
504
505 return err;
506}
507
9efa7525
AS
508int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
509 u16 *qkey_viol_cntr)
510{
511 u32 *out;
512 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
513
1b9a07ee 514 out = kvzalloc(outlen, GFP_KERNEL);
9efa7525
AS
515 if (!out)
516 return -ENOMEM;
517
048ccca8 518 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
9efa7525
AS
519
520 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
521 nic_vport_context.qkey_violation_counter);
522
a5898e97 523 kvfree(out);
9efa7525
AS
524
525 return 0;
526}
527EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
528
707c4602
MD
529int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
530 u8 port_num, u16 vf_num, u16 gid_index,
531 union ib_gid *gid)
532{
533 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
534 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
535 int is_group_manager;
536 void *out = NULL;
537 void *in = NULL;
538 union ib_gid *tmp;
539 int tbsz;
540 int nout;
541 int err;
542
543 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
544 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
545 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
546 vf_num, gid_index, tbsz);
547
548 if (gid_index > tbsz && gid_index != 0xffff)
549 return -EINVAL;
550
551 if (gid_index == 0xffff)
552 nout = tbsz;
553 else
554 nout = 1;
555
556 out_sz += nout * sizeof(*gid);
557
558 in = kzalloc(in_sz, GFP_KERNEL);
559 out = kzalloc(out_sz, GFP_KERNEL);
560 if (!in || !out) {
561 err = -ENOMEM;
562 goto out;
563 }
564
565 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
566 if (other_vport) {
567 if (is_group_manager) {
568 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
569 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
570 } else {
571 err = -EPERM;
572 goto out;
573 }
574 }
575 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
576
577 if (MLX5_CAP_GEN(dev, num_ports) == 2)
578 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
579
580 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
581 if (err)
582 goto out;
583
707c4602
MD
584 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
585 gid->global.subnet_prefix = tmp->global.subnet_prefix;
586 gid->global.interface_id = tmp->global.interface_id;
587
588out:
589 kfree(in);
590 kfree(out);
591 return err;
592}
593EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
594
595int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
596 u8 port_num, u16 vf_num, u16 pkey_index,
597 u16 *pkey)
598{
599 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
600 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
601 int is_group_manager;
602 void *out = NULL;
603 void *in = NULL;
604 void *pkarr;
605 int nout;
606 int tbsz;
607 int err;
608 int i;
609
610 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
611
612 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
613 if (pkey_index > tbsz && pkey_index != 0xffff)
614 return -EINVAL;
615
616 if (pkey_index == 0xffff)
617 nout = tbsz;
618 else
619 nout = 1;
620
621 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
622
623 in = kzalloc(in_sz, GFP_KERNEL);
624 out = kzalloc(out_sz, GFP_KERNEL);
625 if (!in || !out) {
626 err = -ENOMEM;
627 goto out;
628 }
629
630 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
631 if (other_vport) {
632 if (is_group_manager) {
633 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
634 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
635 } else {
636 err = -EPERM;
637 goto out;
638 }
639 }
640 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
641
642 if (MLX5_CAP_GEN(dev, num_ports) == 2)
643 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
644
645 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
646 if (err)
647 goto out;
648
707c4602
MD
649 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
650 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
651 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
652
653out:
654 kfree(in);
655 kfree(out);
656 return err;
657}
658EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
659
660int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
661 u8 other_vport, u8 port_num,
662 u16 vf_num,
663 struct mlx5_hca_vport_context *rep)
664{
665 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
c4f287c4 666 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
707c4602
MD
667 int is_group_manager;
668 void *out;
669 void *ctx;
670 int err;
671
672 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
673
707c4602
MD
674 out = kzalloc(out_sz, GFP_KERNEL);
675 if (!out)
676 return -ENOMEM;
677
678 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
679
680 if (other_vport) {
681 if (is_group_manager) {
682 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
683 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
684 } else {
685 err = -EPERM;
686 goto ex;
687 }
688 }
689
690 if (MLX5_CAP_GEN(dev, num_ports) == 2)
691 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
692
693 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
707c4602
MD
694 if (err)
695 goto ex;
696
697 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
698 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
699 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
700 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
701 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
702 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
703 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
704 port_physical_state);
705 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
706 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
707 port_physical_state);
708 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
709 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
710 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
711 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
712 cap_mask1_field_select);
713 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
714 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
715 cap_mask2_field_select);
716 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
717 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
718 init_type_reply);
719 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
720 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
721 subnet_timeout);
722 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
723 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
724 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
725 qkey_violation_counter);
726 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
727 pkey_violation_counter);
728 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
729 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
730 system_image_guid);
731
732ex:
733 kfree(out);
734 return err;
735}
736EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
737
738int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
7cf7fa52 739 u64 *sys_image_guid)
707c4602
MD
740{
741 struct mlx5_hca_vport_context *rep;
742 int err;
743
744 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
745 if (!rep)
746 return -ENOMEM;
747
748 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
749 if (!err)
750 *sys_image_guid = rep->sys_image_guid;
751
752 kfree(rep);
753 return err;
754}
755EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
756
757int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
758 u64 *node_guid)
759{
760 struct mlx5_hca_vport_context *rep;
761 int err;
762
763 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
764 if (!rep)
765 return -ENOMEM;
766
767 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
768 if (!err)
769 *node_guid = rep->node_guid;
770
771 kfree(rep);
772 return err;
773}
774EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
d82b7318
SM
775
776int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
7e4c4330 777 u16 vport,
d82b7318
SM
778 int *promisc_uc,
779 int *promisc_mc,
780 int *promisc_all)
781{
782 u32 *out;
783 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
784 int err;
785
786 out = kzalloc(outlen, GFP_KERNEL);
787 if (!out)
788 return -ENOMEM;
789
790 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
791 if (err)
792 goto out;
793
794 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
795 nic_vport_context.promisc_uc);
796 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
797 nic_vport_context.promisc_mc);
798 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
799 nic_vport_context.promisc_all);
800
801out:
802 kfree(out);
803 return err;
804}
805EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
806
807int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
808 int promisc_uc,
809 int promisc_mc,
810 int promisc_all)
811{
812 void *in;
813 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
814 int err;
815
1b9a07ee
LR
816 in = kvzalloc(inlen, GFP_KERNEL);
817 if (!in)
d82b7318 818 return -ENOMEM;
d82b7318
SM
819
820 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
821 MLX5_SET(modify_nic_vport_context_in, in,
822 nic_vport_context.promisc_uc, promisc_uc);
823 MLX5_SET(modify_nic_vport_context_in, in,
824 nic_vport_context.promisc_mc, promisc_mc);
825 MLX5_SET(modify_nic_vport_context_in, in,
826 nic_vport_context.promisc_all, promisc_all);
827
828 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
829
830 kvfree(in);
831
832 return err;
833}
834EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
048ccca8 835
bded747b
HN
836enum {
837 UC_LOCAL_LB,
838 MC_LOCAL_LB
839};
840
841int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
842{
843 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
844 void *in;
845 int err;
846
8978cc92
EBE
847 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
848 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
849 return 0;
850
bded747b
HN
851 in = kvzalloc(inlen, GFP_KERNEL);
852 if (!in)
853 return -ENOMEM;
854
bded747b
HN
855 MLX5_SET(modify_nic_vport_context_in, in,
856 nic_vport_context.disable_mc_local_lb, !enable);
bded747b
HN
857 MLX5_SET(modify_nic_vport_context_in, in,
858 nic_vport_context.disable_uc_local_lb, !enable);
859
8978cc92
EBE
860 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
861 MLX5_SET(modify_nic_vport_context_in, in,
862 field_select.disable_mc_local_lb, 1);
863
864 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
865 MLX5_SET(modify_nic_vport_context_in, in,
866 field_select.disable_uc_local_lb, 1);
867
bded747b
HN
868 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
869
8978cc92
EBE
870 if (!err)
871 mlx5_core_dbg(mdev, "%s local_lb\n",
872 enable ? "enable" : "disable");
873
bded747b
HN
874 kvfree(in);
875 return err;
876}
877EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
878
879int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
880{
881 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
882 u32 *out;
883 int value;
884 int err;
885
886 out = kzalloc(outlen, GFP_KERNEL);
887 if (!out)
888 return -ENOMEM;
889
890 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
891 if (err)
892 goto out;
893
894 value = MLX5_GET(query_nic_vport_context_out, out,
895 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
896
897 value |= MLX5_GET(query_nic_vport_context_out, out,
898 nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
899
900 *status = !value;
901
902out:
903 kfree(out);
904 return err;
905}
906EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
907
0de60af6
AS
908enum mlx5_vport_roce_state {
909 MLX5_VPORT_ROCE_DISABLED = 0,
910 MLX5_VPORT_ROCE_ENABLED = 1,
911};
912
913static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
914 enum mlx5_vport_roce_state state)
915{
916 void *in;
917 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
918 int err;
919
1b9a07ee
LR
920 in = kvzalloc(inlen, GFP_KERNEL);
921 if (!in)
0de60af6 922 return -ENOMEM;
0de60af6
AS
923
924 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
925 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
926 state);
927
928 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
929
930 kvfree(in);
931
932 return err;
933}
934
935int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
936{
734dc065
DJ
937 int err = 0;
938
939 mutex_lock(&mlx5_roce_en_lock);
940 if (!mdev->roce.roce_en)
941 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
942
943 if (!err)
944 mdev->roce.roce_en++;
945 mutex_unlock(&mlx5_roce_en_lock);
946
947 return err;
0de60af6
AS
948}
949EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
950
951int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
952{
734dc065
DJ
953 int err = 0;
954
955 mutex_lock(&mlx5_roce_en_lock);
956 if (mdev->roce.roce_en) {
957 mdev->roce.roce_en--;
958 if (mdev->roce.roce_en == 0)
959 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
960
961 if (err)
962 mdev->roce.roce_en++;
963 }
964 mutex_unlock(&mlx5_roce_en_lock);
965 return err;
0de60af6
AS
966}
967EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
b54ba277
MY
968
969int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
2a4826fe
EC
970 int vf, u8 port_num, void *out,
971 size_t out_sz)
b54ba277
MY
972{
973 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
974 int is_group_manager;
975 void *in;
976 int err;
977
978 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1b9a07ee 979 in = kvzalloc(in_sz, GFP_KERNEL);
b54ba277
MY
980 if (!in) {
981 err = -ENOMEM;
982 return err;
983 }
984
985 MLX5_SET(query_vport_counter_in, in, opcode,
986 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
987 if (other_vport) {
988 if (is_group_manager) {
989 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2a4826fe 990 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
b54ba277
MY
991 } else {
992 err = -EPERM;
993 goto free;
994 }
995 }
996 if (MLX5_CAP_GEN(dev, num_ports) == 2)
997 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
998
999 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
b54ba277
MY
1000free:
1001 kvfree(in);
1002 return err;
1003}
1004EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1f324bff 1005
aaabd078 1006int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
cbc44e76 1007 u8 other_vport, u64 *rx_discard_vport_down,
aaabd078
MS
1008 u64 *tx_discard_vport_down)
1009{
1010 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
1011 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
1012 int err;
1013
1014 MLX5_SET(query_vnic_env_in, in, opcode,
1015 MLX5_CMD_OP_QUERY_VNIC_ENV);
1016 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1017 MLX5_SET(query_vnic_env_in, in, vport_number, vport);
cbc44e76 1018 MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
aaabd078
MS
1019
1020 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1021 if (err)
1022 return err;
1023
1024 *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1025 vport_env.receive_discard_vport_down);
1026 *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1027 vport_env.transmit_discard_vport_down);
1028 return 0;
1029}
1030
1f324bff
EC
1031int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1032 u8 other_vport, u8 port_num,
1033 int vf,
1034 struct mlx5_hca_vport_context *req)
1035{
1036 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1037 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
1038 int is_group_manager;
1039 void *in;
1040 int err;
1041 void *ctx;
1042
1043 mlx5_core_dbg(dev, "vf %d\n", vf);
1044 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1045 in = kzalloc(in_sz, GFP_KERNEL);
1046 if (!in)
1047 return -ENOMEM;
1048
1049 memset(out, 0, sizeof(out));
1050 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1051 if (other_vport) {
1052 if (is_group_manager) {
1053 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1054 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1055 } else {
1056 err = -EPERM;
1057 goto ex;
1058 }
1059 }
1060
1061 if (MLX5_CAP_GEN(dev, num_ports) > 1)
1062 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1063
1064 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1065 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1066 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1067 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1068 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1069 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1070 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1071 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1072 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1073 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1074 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1075 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1076 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1077 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1078 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1079 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1080 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1081 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1082 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1083 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1084 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1085 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1086 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1f324bff
EC
1087ex:
1088 kfree(in);
1089 return err;
1090}
1091EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
32f69e4b
DJ
1092
1093int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1094 struct mlx5_core_dev *port_mdev)
1095{
1096 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1097 void *in;
1098 int err;
1099
1100 in = kvzalloc(inlen, GFP_KERNEL);
1101 if (!in)
1102 return -ENOMEM;
1103
1104 err = mlx5_nic_vport_enable_roce(port_mdev);
1105 if (err)
1106 goto free;
1107
1108 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1109 MLX5_SET(modify_nic_vport_context_in, in,
1110 nic_vport_context.affiliated_vhca_id,
1111 MLX5_CAP_GEN(master_mdev, vhca_id));
1112 MLX5_SET(modify_nic_vport_context_in, in,
1113 nic_vport_context.affiliation_criteria,
1114 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1115
1116 err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
1117 if (err)
1118 mlx5_nic_vport_disable_roce(port_mdev);
1119
1120free:
1121 kvfree(in);
1122 return err;
1123}
1124EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1125
1126int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1127{
1128 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1129 void *in;
1130 int err;
1131
1132 in = kvzalloc(inlen, GFP_KERNEL);
1133 if (!in)
1134 return -ENOMEM;
1135
1136 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1137 MLX5_SET(modify_nic_vport_context_in, in,
1138 nic_vport_context.affiliated_vhca_id, 0);
1139 MLX5_SET(modify_nic_vport_context_in, in,
1140 nic_vport_context.affiliation_criteria, 0);
1141
1142 err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
1143 if (!err)
1144 mlx5_nic_vport_disable_roce(port_mdev);
1145
1146 kvfree(in);
1147 return err;
1148}
1149EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
59c9d35e
AH
1150
1151u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
1152{
0a5b5891
AH
1153 int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1154 u64 tmp = 0;
59c9d35e 1155
0a5b5891
AH
1156 if (mdev->sys_image_guid)
1157 return mdev->sys_image_guid;
1158
1159 if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
1160 mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
1161 else
1162 mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
1163
1164 mdev->sys_image_guid = tmp;
1165
1166 return tmp;
59c9d35e
AH
1167}
1168EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
2752b823
PP
1169
1170/**
1171 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
1172 *
1173 * @dev: Pointer to core device
1174 *
1175 * mlx5_eswitch_get_total_vports returns total number of vports for
1176 * the eswitch.
1177 */
1178u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
1179{
1180 return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
1181}
1182EXPORT_SYMBOL(mlx5_eswitch_get_total_vports);