IB/mlx5: Make netdev notifications multiport capable
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / vport.c
CommitLineData
afb736e9
AV
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/export.h>
34#include <linux/etherdevice.h>
35#include <linux/mlx5/driver.h>
d18a9470 36#include <linux/mlx5/vport.h>
afb736e9
AV
37#include "mlx5_core.h"
38
734dc065
DJ
39/* Mutex to hold while enabling or disabling RoCE */
40static DEFINE_MUTEX(mlx5_roce_en_lock);
41
e7546514
SM
42static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
43 u16 vport, u32 *out, int outlen)
afb736e9 44{
c4f287c4 45 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
afb736e9
AV
46
47 MLX5_SET(query_vport_state_in, in, opcode,
48 MLX5_CMD_OP_QUERY_VPORT_STATE);
49 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
e7546514
SM
50 MLX5_SET(query_vport_state_in, in, vport_number, vport);
51 if (vport)
52 MLX5_SET(query_vport_state_in, in, other_vport, 1);
afb736e9 53
c4f287c4 54 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
e7546514
SM
55}
56
57u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
58{
59 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
60
61 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
62
afb736e9
AV
63 return MLX5_GET(query_vport_state_out, out, state);
64}
e7546514
SM
65EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
66
67u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
68{
69 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
70
71 _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
72
73 return MLX5_GET(query_vport_state_out, out, admin_state);
74}
048ccca8 75EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
e7546514
SM
76
77int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
78 u16 vport, u8 state)
79{
c4f287c4
SM
80 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
81 u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
e7546514
SM
82
83 MLX5_SET(modify_vport_state_in, in, opcode,
84 MLX5_CMD_OP_MODIFY_VPORT_STATE);
85 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
86 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
e7546514
SM
87 if (vport)
88 MLX5_SET(modify_vport_state_in, in, other_vport, 1);
e7546514
SM
89 MLX5_SET(modify_vport_state_in, in, admin_state, state);
90
c4f287c4 91 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
e7546514 92}
048ccca8 93EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
afb736e9 94
e1d7d349
SM
95static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
96 u32 *out, int outlen)
97{
c4f287c4 98 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
e1d7d349
SM
99
100 MLX5_SET(query_nic_vport_context_in, in, opcode,
101 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
e1d7d349
SM
102 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
103 if (vport)
104 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
105
c4f287c4 106 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
e1d7d349
SM
107}
108
109static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
110 int inlen)
111{
c4f287c4 112 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
e1d7d349
SM
113
114 MLX5_SET(modify_nic_vport_context_in, in, opcode,
115 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
c4f287c4 116 return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
e1d7d349
SM
117}
118
34e4e990
RD
119int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
120 u16 vport, u8 *min_inline)
cff92d7c
HHZ
121{
122 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
34e4e990 123 int err;
cff92d7c 124
34e4e990
RD
125 err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
126 if (!err)
127 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
128 nic_vport_context.min_wqe_inline_mode);
129 return err;
cff92d7c
HHZ
130}
131EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
132
8c7245a6
OG
133void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
134 u8 *min_inline_mode)
135{
136 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
137 case MLX5_CAP_INLINE_MODE_L2:
138 *min_inline_mode = MLX5_INLINE_MODE_L2;
139 break;
140 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
141 mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
142 break;
143 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
144 *min_inline_mode = MLX5_INLINE_MODE_NONE;
145 break;
146 }
147}
148EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
149
9def7121
HHZ
150int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
151 u16 vport, u8 min_inline)
152{
153 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
154 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
155 void *nic_vport_ctx;
156
157 MLX5_SET(modify_nic_vport_context_in, in,
158 field_select.min_inline, 1);
159 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
160 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
161
162 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
163 in, nic_vport_context);
164 MLX5_SET(nic_vport_context, nic_vport_ctx,
165 min_wqe_inline_mode, min_inline);
166
167 return mlx5_modify_nic_vport_context(mdev, in, inlen);
168}
169
e1d7d349
SM
170int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
171 u16 vport, u8 *addr)
afb736e9 172{
afb736e9
AV
173 u32 *out;
174 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
175 u8 *out_addr;
e1d7d349 176 int err;
afb736e9 177
1b9a07ee 178 out = kvzalloc(outlen, GFP_KERNEL);
afb736e9 179 if (!out)
e1d7d349 180 return -ENOMEM;
afb736e9
AV
181
182 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
183 nic_vport_context.permanent_address);
184
e1d7d349 185 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
e5f6175c
AS
186 if (!err)
187 ether_addr_copy(addr, &out_addr[2]);
afb736e9
AV
188
189 kvfree(out);
e1d7d349
SM
190 return err;
191}
192EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
193
194int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
195 u16 vport, u8 *addr)
196{
197 void *in;
198 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
199 int err;
200 void *nic_vport_ctx;
201 u8 *perm_mac;
202
1b9a07ee
LR
203 in = kvzalloc(inlen, GFP_KERNEL);
204 if (!in)
e1d7d349 205 return -ENOMEM;
e1d7d349
SM
206
207 MLX5_SET(modify_nic_vport_context_in, in,
208 field_select.permanent_address, 1);
209 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
210
211 if (vport)
212 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
213
214 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
215 in, nic_vport_context);
216 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
217 permanent_address);
218
219 ether_addr_copy(&perm_mac[2], addr);
220
221 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
222
223 kvfree(in);
224
225 return err;
afb736e9 226}
048ccca8 227EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
707c4602 228
cd255eff
SM
229int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
230{
231 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
232 u32 *out;
233 int err;
234
1b9a07ee 235 out = kvzalloc(outlen, GFP_KERNEL);
cd255eff
SM
236 if (!out)
237 return -ENOMEM;
238
239 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
240 if (!err)
241 *mtu = MLX5_GET(query_nic_vport_context_out, out,
242 nic_vport_context.mtu);
243
244 kvfree(out);
245 return err;
246}
247EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
248
249int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
250{
251 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
252 void *in;
253 int err;
254
1b9a07ee 255 in = kvzalloc(inlen, GFP_KERNEL);
cd255eff
SM
256 if (!in)
257 return -ENOMEM;
258
259 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
260 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
261
262 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
263
264 kvfree(in);
265 return err;
266}
267EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
268
e16aea27
SM
269int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
270 u32 vport,
271 enum mlx5_list_type list_type,
272 u8 addr_list[][ETH_ALEN],
273 int *list_size)
274{
c4f287c4 275 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
e16aea27
SM
276 void *nic_vport_ctx;
277 int max_list_size;
278 int req_list_size;
279 int out_sz;
280 void *out;
281 int err;
282 int i;
283
284 req_list_size = *list_size;
285
286 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
287 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
288 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
289
290 if (req_list_size > max_list_size) {
291 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
292 req_list_size, max_list_size);
293 req_list_size = max_list_size;
294 }
295
296 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
297 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
298
e16aea27
SM
299 out = kzalloc(out_sz, GFP_KERNEL);
300 if (!out)
301 return -ENOMEM;
302
303 MLX5_SET(query_nic_vport_context_in, in, opcode,
304 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
305 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
306 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
307
308 if (vport)
309 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
310
c4f287c4 311 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
e16aea27
SM
312 if (err)
313 goto out;
314
315 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
316 nic_vport_context);
317 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
318 allowed_list_size);
319
320 *list_size = req_list_size;
321 for (i = 0; i < req_list_size; i++) {
322 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
323 nic_vport_ctx,
324 current_uc_mac_address[i]) + 2;
325 ether_addr_copy(addr_list[i], mac_addr);
326 }
327out:
328 kfree(out);
329 return err;
330}
331EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
332
333int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
334 enum mlx5_list_type list_type,
335 u8 addr_list[][ETH_ALEN],
336 int list_size)
337{
338 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
339 void *nic_vport_ctx;
340 int max_list_size;
341 int in_sz;
342 void *in;
343 int err;
344 int i;
345
346 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
347 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
348 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
349
350 if (list_size > max_list_size)
351 return -ENOSPC;
352
353 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
354 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
355
356 memset(out, 0, sizeof(out));
357 in = kzalloc(in_sz, GFP_KERNEL);
358 if (!in)
359 return -ENOMEM;
360
361 MLX5_SET(modify_nic_vport_context_in, in, opcode,
362 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
363 MLX5_SET(modify_nic_vport_context_in, in,
364 field_select.addresses_list, 1);
365
366 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
367 nic_vport_context);
368
369 MLX5_SET(nic_vport_context, nic_vport_ctx,
370 allowed_list_type, list_type);
371 MLX5_SET(nic_vport_context, nic_vport_ctx,
372 allowed_list_size, list_size);
373
374 for (i = 0; i < list_size; i++) {
375 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
376 nic_vport_ctx,
377 current_uc_mac_address[i]) + 2;
378 ether_addr_copy(curr_mac, addr_list[i]);
379 }
380
c4f287c4 381 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
e16aea27
SM
382 kfree(in);
383 return err;
384}
385EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
386
c0046cf7
SM
387int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
388 u32 vport,
389 u16 vlans[],
390 int *size)
391{
392 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
393 void *nic_vport_ctx;
394 int req_list_size;
395 int max_list_size;
396 int out_sz;
397 void *out;
398 int err;
399 int i;
400
401 req_list_size = *size;
402 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
403 if (req_list_size > max_list_size) {
404 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
405 req_list_size, max_list_size);
406 req_list_size = max_list_size;
407 }
408
409 out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
410 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
411
412 memset(in, 0, sizeof(in));
413 out = kzalloc(out_sz, GFP_KERNEL);
414 if (!out)
415 return -ENOMEM;
416
417 MLX5_SET(query_nic_vport_context_in, in, opcode,
418 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
419 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
420 MLX5_NVPRT_LIST_TYPE_VLAN);
421 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
422
423 if (vport)
424 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
425
c4f287c4 426 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
c0046cf7
SM
427 if (err)
428 goto out;
429
430 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
431 nic_vport_context);
432 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
433 allowed_list_size);
434
435 *size = req_list_size;
436 for (i = 0; i < req_list_size; i++) {
437 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
438 nic_vport_ctx,
439 current_uc_mac_address[i]);
440 vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
441 }
442out:
443 kfree(out);
444 return err;
445}
446EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
447
448int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
449 u16 vlans[],
450 int list_size)
451{
452 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
453 void *nic_vport_ctx;
454 int max_list_size;
455 int in_sz;
456 void *in;
457 int err;
458 int i;
459
460 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
461
462 if (list_size > max_list_size)
463 return -ENOSPC;
464
465 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
466 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
467
468 memset(out, 0, sizeof(out));
469 in = kzalloc(in_sz, GFP_KERNEL);
470 if (!in)
471 return -ENOMEM;
472
473 MLX5_SET(modify_nic_vport_context_in, in, opcode,
474 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
475 MLX5_SET(modify_nic_vport_context_in, in,
476 field_select.addresses_list, 1);
477
478 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
479 nic_vport_context);
480
481 MLX5_SET(nic_vport_context, nic_vport_ctx,
482 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
483 MLX5_SET(nic_vport_context, nic_vport_ctx,
484 allowed_list_size, list_size);
485
486 for (i = 0; i < list_size; i++) {
487 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
488 nic_vport_ctx,
489 current_uc_mac_address[i]);
490 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
491 }
492
c4f287c4 493 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
c0046cf7
SM
494 kfree(in);
495 return err;
496}
497EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
498
9efa7525
AS
499int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
500 u64 *system_image_guid)
501{
502 u32 *out;
503 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
504
1b9a07ee 505 out = kvzalloc(outlen, GFP_KERNEL);
9efa7525
AS
506 if (!out)
507 return -ENOMEM;
508
048ccca8 509 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
9efa7525
AS
510
511 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
512 nic_vport_context.system_image_guid);
513
514 kfree(out);
515
516 return 0;
517}
518EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
519
520int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
521{
522 u32 *out;
523 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
524
1b9a07ee 525 out = kvzalloc(outlen, GFP_KERNEL);
9efa7525
AS
526 if (!out)
527 return -ENOMEM;
528
048ccca8 529 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
9efa7525
AS
530
531 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
532 nic_vport_context.node_guid);
533
534 kfree(out);
535
536 return 0;
537}
538EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
539
23898c76
NO
540int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
541 u32 vport, u64 node_guid)
542{
543 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
544 void *nic_vport_context;
23898c76
NO
545 void *in;
546 int err;
547
548 if (!vport)
549 return -EINVAL;
550 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
551 return -EACCES;
552 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
9eb78923 553 return -EOPNOTSUPP;
23898c76 554
1b9a07ee 555 in = kvzalloc(inlen, GFP_KERNEL);
23898c76
NO
556 if (!in)
557 return -ENOMEM;
558
559 MLX5_SET(modify_nic_vport_context_in, in,
560 field_select.node_guid, 1);
561 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
562 MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
563
564 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
565 in, nic_vport_context);
23898c76
NO
566 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
567
568 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
569
570 kvfree(in);
571
572 return err;
573}
574
9efa7525
AS
575int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
576 u16 *qkey_viol_cntr)
577{
578 u32 *out;
579 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
580
1b9a07ee 581 out = kvzalloc(outlen, GFP_KERNEL);
9efa7525
AS
582 if (!out)
583 return -ENOMEM;
584
048ccca8 585 mlx5_query_nic_vport_context(mdev, 0, out, outlen);
9efa7525
AS
586
587 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
588 nic_vport_context.qkey_violation_counter);
589
590 kfree(out);
591
592 return 0;
593}
594EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
595
707c4602
MD
596int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
597 u8 port_num, u16 vf_num, u16 gid_index,
598 union ib_gid *gid)
599{
600 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
601 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
602 int is_group_manager;
603 void *out = NULL;
604 void *in = NULL;
605 union ib_gid *tmp;
606 int tbsz;
607 int nout;
608 int err;
609
610 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
611 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
612 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
613 vf_num, gid_index, tbsz);
614
615 if (gid_index > tbsz && gid_index != 0xffff)
616 return -EINVAL;
617
618 if (gid_index == 0xffff)
619 nout = tbsz;
620 else
621 nout = 1;
622
623 out_sz += nout * sizeof(*gid);
624
625 in = kzalloc(in_sz, GFP_KERNEL);
626 out = kzalloc(out_sz, GFP_KERNEL);
627 if (!in || !out) {
628 err = -ENOMEM;
629 goto out;
630 }
631
632 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
633 if (other_vport) {
634 if (is_group_manager) {
635 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
636 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
637 } else {
638 err = -EPERM;
639 goto out;
640 }
641 }
642 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
643
644 if (MLX5_CAP_GEN(dev, num_ports) == 2)
645 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
646
647 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
648 if (err)
649 goto out;
650
707c4602
MD
651 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
652 gid->global.subnet_prefix = tmp->global.subnet_prefix;
653 gid->global.interface_id = tmp->global.interface_id;
654
655out:
656 kfree(in);
657 kfree(out);
658 return err;
659}
660EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
661
662int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
663 u8 port_num, u16 vf_num, u16 pkey_index,
664 u16 *pkey)
665{
666 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
667 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
668 int is_group_manager;
669 void *out = NULL;
670 void *in = NULL;
671 void *pkarr;
672 int nout;
673 int tbsz;
674 int err;
675 int i;
676
677 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
678
679 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
680 if (pkey_index > tbsz && pkey_index != 0xffff)
681 return -EINVAL;
682
683 if (pkey_index == 0xffff)
684 nout = tbsz;
685 else
686 nout = 1;
687
688 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
689
690 in = kzalloc(in_sz, GFP_KERNEL);
691 out = kzalloc(out_sz, GFP_KERNEL);
692 if (!in || !out) {
693 err = -ENOMEM;
694 goto out;
695 }
696
697 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
698 if (other_vport) {
699 if (is_group_manager) {
700 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
701 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
702 } else {
703 err = -EPERM;
704 goto out;
705 }
706 }
707 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
708
709 if (MLX5_CAP_GEN(dev, num_ports) == 2)
710 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
711
712 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
713 if (err)
714 goto out;
715
707c4602
MD
716 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
717 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
718 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
719
720out:
721 kfree(in);
722 kfree(out);
723 return err;
724}
725EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
726
727int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
728 u8 other_vport, u8 port_num,
729 u16 vf_num,
730 struct mlx5_hca_vport_context *rep)
731{
732 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
c4f287c4 733 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
707c4602
MD
734 int is_group_manager;
735 void *out;
736 void *ctx;
737 int err;
738
739 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
740
707c4602
MD
741 out = kzalloc(out_sz, GFP_KERNEL);
742 if (!out)
743 return -ENOMEM;
744
745 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
746
747 if (other_vport) {
748 if (is_group_manager) {
749 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
750 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
751 } else {
752 err = -EPERM;
753 goto ex;
754 }
755 }
756
757 if (MLX5_CAP_GEN(dev, num_ports) == 2)
758 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
759
760 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
707c4602
MD
761 if (err)
762 goto ex;
763
764 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
765 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
766 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
767 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
768 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
769 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
770 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
771 port_physical_state);
772 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
773 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
774 port_physical_state);
775 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
776 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
777 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
778 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
779 cap_mask1_field_select);
780 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
781 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
782 cap_mask2_field_select);
783 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
784 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
785 init_type_reply);
786 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
787 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
788 subnet_timeout);
789 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
790 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
791 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
792 qkey_violation_counter);
793 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
794 pkey_violation_counter);
795 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
796 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
797 system_image_guid);
798
799ex:
800 kfree(out);
801 return err;
802}
803EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
804
805int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
7cf7fa52 806 u64 *sys_image_guid)
707c4602
MD
807{
808 struct mlx5_hca_vport_context *rep;
809 int err;
810
811 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
812 if (!rep)
813 return -ENOMEM;
814
815 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
816 if (!err)
817 *sys_image_guid = rep->sys_image_guid;
818
819 kfree(rep);
820 return err;
821}
822EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
823
824int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
825 u64 *node_guid)
826{
827 struct mlx5_hca_vport_context *rep;
828 int err;
829
830 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
831 if (!rep)
832 return -ENOMEM;
833
834 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
835 if (!err)
836 *node_guid = rep->node_guid;
837
838 kfree(rep);
839 return err;
840}
841EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
d82b7318
SM
842
843int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
844 u32 vport,
845 int *promisc_uc,
846 int *promisc_mc,
847 int *promisc_all)
848{
849 u32 *out;
850 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
851 int err;
852
853 out = kzalloc(outlen, GFP_KERNEL);
854 if (!out)
855 return -ENOMEM;
856
857 err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
858 if (err)
859 goto out;
860
861 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
862 nic_vport_context.promisc_uc);
863 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
864 nic_vport_context.promisc_mc);
865 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
866 nic_vport_context.promisc_all);
867
868out:
869 kfree(out);
870 return err;
871}
872EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
873
874int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
875 int promisc_uc,
876 int promisc_mc,
877 int promisc_all)
878{
879 void *in;
880 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
881 int err;
882
1b9a07ee
LR
883 in = kvzalloc(inlen, GFP_KERNEL);
884 if (!in)
d82b7318 885 return -ENOMEM;
d82b7318
SM
886
887 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
888 MLX5_SET(modify_nic_vport_context_in, in,
889 nic_vport_context.promisc_uc, promisc_uc);
890 MLX5_SET(modify_nic_vport_context_in, in,
891 nic_vport_context.promisc_mc, promisc_mc);
892 MLX5_SET(modify_nic_vport_context_in, in,
893 nic_vport_context.promisc_all, promisc_all);
894
895 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
896
897 kvfree(in);
898
899 return err;
900}
901EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
048ccca8 902
bded747b
HN
903enum {
904 UC_LOCAL_LB,
905 MC_LOCAL_LB
906};
907
908int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
909{
910 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
911 void *in;
912 int err;
913
914 mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
915 in = kvzalloc(inlen, GFP_KERNEL);
916 if (!in)
917 return -ENOMEM;
918
919 MLX5_SET(modify_nic_vport_context_in, in,
920 field_select.disable_mc_local_lb, 1);
921 MLX5_SET(modify_nic_vport_context_in, in,
922 nic_vport_context.disable_mc_local_lb, !enable);
923
924 MLX5_SET(modify_nic_vport_context_in, in,
925 field_select.disable_uc_local_lb, 1);
926 MLX5_SET(modify_nic_vport_context_in, in,
927 nic_vport_context.disable_uc_local_lb, !enable);
928
929 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
930
931 kvfree(in);
932 return err;
933}
934EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
935
936int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
937{
938 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
939 u32 *out;
940 int value;
941 int err;
942
943 out = kzalloc(outlen, GFP_KERNEL);
944 if (!out)
945 return -ENOMEM;
946
947 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
948 if (err)
949 goto out;
950
951 value = MLX5_GET(query_nic_vport_context_out, out,
952 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
953
954 value |= MLX5_GET(query_nic_vport_context_out, out,
955 nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
956
957 *status = !value;
958
959out:
960 kfree(out);
961 return err;
962}
963EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
964
0de60af6
AS
965enum mlx5_vport_roce_state {
966 MLX5_VPORT_ROCE_DISABLED = 0,
967 MLX5_VPORT_ROCE_ENABLED = 1,
968};
969
970static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
971 enum mlx5_vport_roce_state state)
972{
973 void *in;
974 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
975 int err;
976
1b9a07ee
LR
977 in = kvzalloc(inlen, GFP_KERNEL);
978 if (!in)
0de60af6 979 return -ENOMEM;
0de60af6
AS
980
981 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
982 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
983 state);
984
985 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
986
987 kvfree(in);
988
989 return err;
990}
991
992int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
993{
734dc065
DJ
994 int err = 0;
995
996 mutex_lock(&mlx5_roce_en_lock);
997 if (!mdev->roce.roce_en)
998 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
999
1000 if (!err)
1001 mdev->roce.roce_en++;
1002 mutex_unlock(&mlx5_roce_en_lock);
1003
1004 return err;
0de60af6
AS
1005}
1006EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1007
1008int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1009{
734dc065
DJ
1010 int err = 0;
1011
1012 mutex_lock(&mlx5_roce_en_lock);
1013 if (mdev->roce.roce_en) {
1014 mdev->roce.roce_en--;
1015 if (mdev->roce.roce_en == 0)
1016 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
1017
1018 if (err)
1019 mdev->roce.roce_en++;
1020 }
1021 mutex_unlock(&mlx5_roce_en_lock);
1022 return err;
0de60af6
AS
1023}
1024EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
b54ba277
MY
1025
1026int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
2a4826fe
EC
1027 int vf, u8 port_num, void *out,
1028 size_t out_sz)
b54ba277
MY
1029{
1030 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1031 int is_group_manager;
1032 void *in;
1033 int err;
1034
1035 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1b9a07ee 1036 in = kvzalloc(in_sz, GFP_KERNEL);
b54ba277
MY
1037 if (!in) {
1038 err = -ENOMEM;
1039 return err;
1040 }
1041
1042 MLX5_SET(query_vport_counter_in, in, opcode,
1043 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1044 if (other_vport) {
1045 if (is_group_manager) {
1046 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2a4826fe 1047 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
b54ba277
MY
1048 } else {
1049 err = -EPERM;
1050 goto free;
1051 }
1052 }
1053 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1054 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1055
1056 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
b54ba277
MY
1057free:
1058 kvfree(in);
1059 return err;
1060}
1061EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1f324bff
EC
1062
1063int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1064 u8 other_vport, u8 port_num,
1065 int vf,
1066 struct mlx5_hca_vport_context *req)
1067{
1068 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1069 u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
1070 int is_group_manager;
1071 void *in;
1072 int err;
1073 void *ctx;
1074
1075 mlx5_core_dbg(dev, "vf %d\n", vf);
1076 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1077 in = kzalloc(in_sz, GFP_KERNEL);
1078 if (!in)
1079 return -ENOMEM;
1080
1081 memset(out, 0, sizeof(out));
1082 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1083 if (other_vport) {
1084 if (is_group_manager) {
1085 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1086 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1087 } else {
1088 err = -EPERM;
1089 goto ex;
1090 }
1091 }
1092
1093 if (MLX5_CAP_GEN(dev, num_ports) > 1)
1094 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1095
1096 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1097 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1098 MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
1099 MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
1100 MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
1101 MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
1102 MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
1103 MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
1104 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1105 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1106 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1107 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
1108 MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
1109 MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
1110 MLX5_SET(hca_vport_context, ctx, lid, req->lid);
1111 MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
1112 MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
1113 MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
1114 MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
1115 MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
1116 MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
1117 MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
1118 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
1f324bff
EC
1119ex:
1120 kfree(in);
1121 return err;
1122}
1123EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);