treewide: kmalloc() -> kmalloc_array()
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
7fb40f87 45#include <linux/etherdevice.h>
c82e9aa0
EC
46
47#include "mlx4.h"
48#include "fw.h"
62a89055 49#include "mlx4_stats.h"
c82e9aa0
EC
50
51#define MLX4_MAC_VALID (1ull << 63)
9de92c60
EBE
52#define MLX4_PF_COUNTERS_PER_PORT 2
53#define MLX4_VF_COUNTERS_PER_PORT 1
c82e9aa0
EC
54
55struct mac_res {
56 struct list_head list;
57 u64 mac;
2f5bb473
JM
58 int ref_count;
59 u8 smac_index;
c82e9aa0
EC
60 u8 port;
61};
62
4874080d
JM
63struct vlan_res {
64 struct list_head list;
65 u16 vlan;
66 int ref_count;
67 int vlan_index;
68 u8 port;
69};
70
c82e9aa0
EC
71struct res_common {
72 struct list_head list;
4af1c048 73 struct rb_node node;
aa1ec3dd 74 u64 res_id;
c82e9aa0
EC
75 int owner;
76 int state;
77 int from_state;
78 int to_state;
79 int removing;
ae5a2e29 80 const char *func_name;
c82e9aa0
EC
81};
82
83enum {
84 RES_ANY_BUSY = 1
85};
86
87struct res_gid {
88 struct list_head list;
89 u8 gid[16];
90 enum mlx4_protocol prot;
9f5b6c63 91 enum mlx4_steer_type steer;
fab1e24a 92 u64 reg_id;
c82e9aa0
EC
93};
94
95enum res_qp_states {
96 RES_QP_BUSY = RES_ANY_BUSY,
97
98 /* QP number was allocated */
99 RES_QP_RESERVED,
100
101 /* ICM memory for QP context was mapped */
102 RES_QP_MAPPED,
103
104 /* QP is in hw ownership */
105 RES_QP_HW
106};
107
c82e9aa0
EC
108struct res_qp {
109 struct res_common com;
110 struct res_mtt *mtt;
111 struct res_cq *rcq;
112 struct res_cq *scq;
113 struct res_srq *srq;
114 struct list_head mcg_list;
115 spinlock_t mcg_spl;
116 int local_qpn;
2c473ae7 117 atomic_t ref_count;
b01978ca 118 u32 qpc_flags;
f0f829bf 119 /* saved qp params before VST enforcement in order to restore on VGT */
b01978ca 120 u8 sched_queue;
f0f829bf
RE
121 __be32 param3;
122 u8 vlan_control;
123 u8 fvl_rx;
124 u8 pri_path_fl;
125 u8 vlan_index;
126 u8 feup;
c82e9aa0
EC
127};
128
129enum res_mtt_states {
130 RES_MTT_BUSY = RES_ANY_BUSY,
131 RES_MTT_ALLOCATED,
132};
133
134static inline const char *mtt_states_str(enum res_mtt_states state)
135{
136 switch (state) {
137 case RES_MTT_BUSY: return "RES_MTT_BUSY";
138 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
139 default: return "Unknown";
140 }
141}
142
143struct res_mtt {
144 struct res_common com;
145 int order;
146 atomic_t ref_count;
147};
148
149enum res_mpt_states {
150 RES_MPT_BUSY = RES_ANY_BUSY,
151 RES_MPT_RESERVED,
152 RES_MPT_MAPPED,
153 RES_MPT_HW,
154};
155
156struct res_mpt {
157 struct res_common com;
158 struct res_mtt *mtt;
159 int key;
160};
161
162enum res_eq_states {
163 RES_EQ_BUSY = RES_ANY_BUSY,
164 RES_EQ_RESERVED,
165 RES_EQ_HW,
166};
167
168struct res_eq {
169 struct res_common com;
170 struct res_mtt *mtt;
171};
172
173enum res_cq_states {
174 RES_CQ_BUSY = RES_ANY_BUSY,
175 RES_CQ_ALLOCATED,
176 RES_CQ_HW,
177};
178
179struct res_cq {
180 struct res_common com;
181 struct res_mtt *mtt;
182 atomic_t ref_count;
183};
184
185enum res_srq_states {
186 RES_SRQ_BUSY = RES_ANY_BUSY,
187 RES_SRQ_ALLOCATED,
188 RES_SRQ_HW,
189};
190
c82e9aa0
EC
191struct res_srq {
192 struct res_common com;
193 struct res_mtt *mtt;
194 struct res_cq *cq;
195 atomic_t ref_count;
196};
197
198enum res_counter_states {
199 RES_COUNTER_BUSY = RES_ANY_BUSY,
200 RES_COUNTER_ALLOCATED,
201};
202
c82e9aa0
EC
203struct res_counter {
204 struct res_common com;
205 int port;
206};
207
ba062d52
JM
208enum res_xrcdn_states {
209 RES_XRCD_BUSY = RES_ANY_BUSY,
210 RES_XRCD_ALLOCATED,
211};
212
213struct res_xrcdn {
214 struct res_common com;
215 int port;
216};
217
1b9c6b06
HHZ
218enum res_fs_rule_states {
219 RES_FS_RULE_BUSY = RES_ANY_BUSY,
220 RES_FS_RULE_ALLOCATED,
221};
222
223struct res_fs_rule {
224 struct res_common com;
2c473ae7 225 int qpn;
78efed27
MS
226 /* VF DMFS mbox with port flipped */
227 void *mirr_mbox;
228 /* > 0 --> apply mirror when getting into HA mode */
229 /* = 0 --> un-apply mirror when getting out of HA mode */
230 u32 mirr_mbox_size;
231 struct list_head mirr_list;
232 u64 mirr_rule_id;
1b9c6b06
HHZ
233};
234
4af1c048
HHZ
235static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
236{
237 struct rb_node *node = root->rb_node;
238
239 while (node) {
3704eb6f
GT
240 struct res_common *res = rb_entry(node, struct res_common,
241 node);
4af1c048
HHZ
242
243 if (res_id < res->res_id)
244 node = node->rb_left;
245 else if (res_id > res->res_id)
246 node = node->rb_right;
247 else
248 return res;
249 }
250 return NULL;
251}
252
253static int res_tracker_insert(struct rb_root *root, struct res_common *res)
254{
255 struct rb_node **new = &(root->rb_node), *parent = NULL;
256
257 /* Figure out where to put new node */
258 while (*new) {
3704eb6f
GT
259 struct res_common *this = rb_entry(*new, struct res_common,
260 node);
4af1c048
HHZ
261
262 parent = *new;
263 if (res->res_id < this->res_id)
264 new = &((*new)->rb_left);
265 else if (res->res_id > this->res_id)
266 new = &((*new)->rb_right);
267 else
268 return -EEXIST;
269 }
270
271 /* Add new node and rebalance tree. */
272 rb_link_node(&res->node, parent, new);
273 rb_insert_color(&res->node, root);
274
275 return 0;
276}
277
54679e14
JM
278enum qp_transition {
279 QP_TRANS_INIT2RTR,
280 QP_TRANS_RTR2RTS,
281 QP_TRANS_RTS2RTS,
282 QP_TRANS_SQERR2RTS,
283 QP_TRANS_SQD2SQD,
284 QP_TRANS_SQD2RTS
285};
286
c82e9aa0 287/* For Debug uses */
95646373 288static const char *resource_str(enum mlx4_resource rt)
c82e9aa0
EC
289{
290 switch (rt) {
291 case RES_QP: return "RES_QP";
292 case RES_CQ: return "RES_CQ";
293 case RES_SRQ: return "RES_SRQ";
294 case RES_MPT: return "RES_MPT";
295 case RES_MTT: return "RES_MTT";
296 case RES_MAC: return "RES_MAC";
4874080d 297 case RES_VLAN: return "RES_VLAN";
c82e9aa0
EC
298 case RES_EQ: return "RES_EQ";
299 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 300 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 301 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
302 default: return "Unknown resource type !!!";
303 };
304}
305
4874080d 306static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
146f3ef4
JM
307static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
308 enum mlx4_resource res_type, int count,
309 int port)
310{
311 struct mlx4_priv *priv = mlx4_priv(dev);
312 struct resource_allocator *res_alloc =
313 &priv->mfunc.master.res_tracker.res_alloc[res_type];
83bd5118 314 int err = -EDQUOT;
146f3ef4 315 int allocated, free, reserved, guaranteed, from_free;
95646373 316 int from_rsvd;
146f3ef4 317
872bf2fb 318 if (slave > dev->persist->num_vfs)
146f3ef4
JM
319 return -EINVAL;
320
321 spin_lock(&res_alloc->alloc_lock);
322 allocated = (port > 0) ?
872bf2fb
YH
323 res_alloc->allocated[(port - 1) *
324 (dev->persist->num_vfs + 1) + slave] :
146f3ef4
JM
325 res_alloc->allocated[slave];
326 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
327 res_alloc->res_free;
328 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
329 res_alloc->res_reserved;
330 guaranteed = res_alloc->guaranteed[slave];
331
95646373
JM
332 if (allocated + count > res_alloc->quota[slave]) {
333 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334 slave, port, resource_str(res_type), count,
335 allocated, res_alloc->quota[slave]);
146f3ef4 336 goto out;
95646373 337 }
146f3ef4
JM
338
339 if (allocated + count <= guaranteed) {
340 err = 0;
95646373 341 from_rsvd = count;
146f3ef4
JM
342 } else {
343 /* portion may need to be obtained from free area */
344 if (guaranteed - allocated > 0)
345 from_free = count - (guaranteed - allocated);
346 else
347 from_free = count;
348
95646373
JM
349 from_rsvd = count - from_free;
350
351 if (free - from_free >= reserved)
146f3ef4 352 err = 0;
95646373
JM
353 else
354 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355 slave, port, resource_str(res_type), free,
356 from_free, reserved);
146f3ef4
JM
357 }
358
359 if (!err) {
360 /* grant the request */
361 if (port > 0) {
872bf2fb
YH
362 res_alloc->allocated[(port - 1) *
363 (dev->persist->num_vfs + 1) + slave] += count;
146f3ef4 364 res_alloc->res_port_free[port - 1] -= count;
95646373 365 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
146f3ef4
JM
366 } else {
367 res_alloc->allocated[slave] += count;
368 res_alloc->res_free -= count;
95646373 369 res_alloc->res_reserved -= from_rsvd;
146f3ef4
JM
370 }
371 }
372
373out:
374 spin_unlock(&res_alloc->alloc_lock);
375 return err;
376}
377
378static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
379 enum mlx4_resource res_type, int count,
380 int port)
381{
382 struct mlx4_priv *priv = mlx4_priv(dev);
383 struct resource_allocator *res_alloc =
384 &priv->mfunc.master.res_tracker.res_alloc[res_type];
95646373 385 int allocated, guaranteed, from_rsvd;
146f3ef4 386
872bf2fb 387 if (slave > dev->persist->num_vfs)
146f3ef4
JM
388 return;
389
390 spin_lock(&res_alloc->alloc_lock);
95646373
JM
391
392 allocated = (port > 0) ?
872bf2fb
YH
393 res_alloc->allocated[(port - 1) *
394 (dev->persist->num_vfs + 1) + slave] :
95646373
JM
395 res_alloc->allocated[slave];
396 guaranteed = res_alloc->guaranteed[slave];
397
398 if (allocated - count >= guaranteed) {
399 from_rsvd = 0;
400 } else {
401 /* portion may need to be returned to reserved area */
402 if (allocated - guaranteed > 0)
403 from_rsvd = count - (allocated - guaranteed);
404 else
405 from_rsvd = count;
406 }
407
146f3ef4 408 if (port > 0) {
872bf2fb
YH
409 res_alloc->allocated[(port - 1) *
410 (dev->persist->num_vfs + 1) + slave] -= count;
146f3ef4 411 res_alloc->res_port_free[port - 1] += count;
95646373 412 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
146f3ef4
JM
413 } else {
414 res_alloc->allocated[slave] -= count;
415 res_alloc->res_free += count;
95646373 416 res_alloc->res_reserved += from_rsvd;
146f3ef4
JM
417 }
418
419 spin_unlock(&res_alloc->alloc_lock);
420 return;
421}
422
5a0d0a61
JM
423static inline void initialize_res_quotas(struct mlx4_dev *dev,
424 struct resource_allocator *res_alloc,
425 enum mlx4_resource res_type,
426 int vf, int num_instances)
427{
872bf2fb
YH
428 res_alloc->guaranteed[vf] = num_instances /
429 (2 * (dev->persist->num_vfs + 1));
5a0d0a61
JM
430 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
431 if (vf == mlx4_master_func_num(dev)) {
432 res_alloc->res_free = num_instances;
433 if (res_type == RES_MTT) {
434 /* reserved mtts will be taken out of the PF allocation */
435 res_alloc->res_free += dev->caps.reserved_mtts;
436 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
437 res_alloc->quota[vf] += dev->caps.reserved_mtts;
438 }
439 }
440}
441
442void mlx4_init_quotas(struct mlx4_dev *dev)
443{
444 struct mlx4_priv *priv = mlx4_priv(dev);
445 int pf;
446
447 /* quotas for VFs are initialized in mlx4_slave_cap */
448 if (mlx4_is_slave(dev))
449 return;
450
451 if (!mlx4_is_mfunc(dev)) {
452 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
453 mlx4_num_reserved_sqps(dev);
454 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
455 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
456 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
457 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
458 return;
459 }
460
461 pf = mlx4_master_func_num(dev);
462 dev->quotas.qp =
463 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
464 dev->quotas.cq =
465 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
466 dev->quotas.srq =
467 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
468 dev->quotas.mtt =
469 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
470 dev->quotas.mpt =
471 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
472}
9de92c60
EBE
473
474static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
475{
476 /* reduce the sink counter */
477 return (dev->caps.max_counters - 1 -
478 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
479 / MLX4_MAX_PORTS;
480}
481
c82e9aa0
EC
482int mlx4_init_resource_tracker(struct mlx4_dev *dev)
483{
484 struct mlx4_priv *priv = mlx4_priv(dev);
5a0d0a61 485 int i, j;
c82e9aa0 486 int t;
9de92c60 487 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
c82e9aa0
EC
488
489 priv->mfunc.master.res_tracker.slave_list =
490 kzalloc(dev->num_slaves * sizeof(struct slave_list),
491 GFP_KERNEL);
492 if (!priv->mfunc.master.res_tracker.slave_list)
493 return -ENOMEM;
494
495 for (i = 0 ; i < dev->num_slaves; i++) {
496 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
497 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
498 slave_list[i].res_list[t]);
499 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
500 }
501
502 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
503 dev->num_slaves);
504 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 505 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0 506
5a0d0a61
JM
507 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
508 struct resource_allocator *res_alloc =
509 &priv->mfunc.master.res_tracker.res_alloc[i];
6da2ec56
KC
510 res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1,
511 sizeof(int),
512 GFP_KERNEL);
513 res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1,
514 sizeof(int),
515 GFP_KERNEL);
5a0d0a61
JM
516 if (i == RES_MAC || i == RES_VLAN)
517 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
872bf2fb
YH
518 (dev->persist->num_vfs
519 + 1) *
520 sizeof(int), GFP_KERNEL);
5a0d0a61 521 else
872bf2fb
YH
522 res_alloc->allocated = kzalloc((dev->persist->
523 num_vfs + 1) *
524 sizeof(int), GFP_KERNEL);
9de92c60
EBE
525 /* Reduce the sink counter */
526 if (i == RES_COUNTER)
527 res_alloc->res_free = dev->caps.max_counters - 1;
5a0d0a61
JM
528
529 if (!res_alloc->quota || !res_alloc->guaranteed ||
530 !res_alloc->allocated)
531 goto no_mem_err;
532
146f3ef4 533 spin_lock_init(&res_alloc->alloc_lock);
872bf2fb 534 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
449fc488
MB
535 struct mlx4_active_ports actv_ports =
536 mlx4_get_active_ports(dev, t);
5a0d0a61
JM
537 switch (i) {
538 case RES_QP:
539 initialize_res_quotas(dev, res_alloc, RES_QP,
540 t, dev->caps.num_qps -
541 dev->caps.reserved_qps -
542 mlx4_num_reserved_sqps(dev));
543 break;
544 case RES_CQ:
545 initialize_res_quotas(dev, res_alloc, RES_CQ,
546 t, dev->caps.num_cqs -
547 dev->caps.reserved_cqs);
548 break;
549 case RES_SRQ:
550 initialize_res_quotas(dev, res_alloc, RES_SRQ,
551 t, dev->caps.num_srqs -
552 dev->caps.reserved_srqs);
553 break;
554 case RES_MPT:
555 initialize_res_quotas(dev, res_alloc, RES_MPT,
556 t, dev->caps.num_mpts -
557 dev->caps.reserved_mrws);
558 break;
559 case RES_MTT:
560 initialize_res_quotas(dev, res_alloc, RES_MTT,
561 t, dev->caps.num_mtts -
562 dev->caps.reserved_mtts);
563 break;
564 case RES_MAC:
565 if (t == mlx4_master_func_num(dev)) {
449fc488
MB
566 int max_vfs_pport = 0;
567 /* Calculate the max vfs per port for */
568 /* both ports. */
569 for (j = 0; j < dev->caps.num_ports;
570 j++) {
571 struct mlx4_slaves_pport slaves_pport =
572 mlx4_phys_to_slaves_pport(dev, j + 1);
573 unsigned current_slaves =
574 bitmap_weight(slaves_pport.slaves,
575 dev->caps.num_ports) - 1;
576 if (max_vfs_pport < current_slaves)
577 max_vfs_pport =
578 current_slaves;
579 }
580 res_alloc->quota[t] =
581 MLX4_MAX_MAC_NUM -
582 2 * max_vfs_pport;
5a0d0a61
JM
583 res_alloc->guaranteed[t] = 2;
584 for (j = 0; j < MLX4_MAX_PORTS; j++)
449fc488
MB
585 res_alloc->res_port_free[j] =
586 MLX4_MAX_MAC_NUM;
5a0d0a61
JM
587 } else {
588 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
589 res_alloc->guaranteed[t] = 2;
590 }
591 break;
592 case RES_VLAN:
593 if (t == mlx4_master_func_num(dev)) {
594 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
595 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
596 for (j = 0; j < MLX4_MAX_PORTS; j++)
597 res_alloc->res_port_free[j] =
598 res_alloc->quota[t];
599 } else {
600 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
601 res_alloc->guaranteed[t] = 0;
602 }
603 break;
604 case RES_COUNTER:
605 res_alloc->quota[t] = dev->caps.max_counters;
5a0d0a61 606 if (t == mlx4_master_func_num(dev))
9de92c60
EBE
607 res_alloc->guaranteed[t] =
608 MLX4_PF_COUNTERS_PER_PORT *
609 MLX4_MAX_PORTS;
610 else if (t <= max_vfs_guarantee_counter)
611 res_alloc->guaranteed[t] =
612 MLX4_VF_COUNTERS_PER_PORT *
613 MLX4_MAX_PORTS;
614 else
615 res_alloc->guaranteed[t] = 0;
5a0d0a61
JM
616 break;
617 default:
618 break;
619 }
620 if (i == RES_MAC || i == RES_VLAN) {
449fc488
MB
621 for (j = 0; j < dev->caps.num_ports; j++)
622 if (test_bit(j, actv_ports.ports))
623 res_alloc->res_port_rsvd[j] +=
624 res_alloc->guaranteed[t];
5a0d0a61
JM
625 } else {
626 res_alloc->res_reserved += res_alloc->guaranteed[t];
627 }
628 }
629 }
c82e9aa0 630 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
5a0d0a61
JM
631 return 0;
632
633no_mem_err:
634 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
635 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
636 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
637 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
638 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
639 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
640 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
641 }
642 return -ENOMEM;
c82e9aa0
EC
643}
644
b8924951
JM
645void mlx4_free_resource_tracker(struct mlx4_dev *dev,
646 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
647{
648 struct mlx4_priv *priv = mlx4_priv(dev);
649 int i;
650
651 if (priv->mfunc.master.res_tracker.slave_list) {
4874080d
JM
652 if (type != RES_TR_FREE_STRUCTS_ONLY) {
653 for (i = 0; i < dev->num_slaves; i++) {
b8924951
JM
654 if (type == RES_TR_FREE_ALL ||
655 dev->caps.function != i)
656 mlx4_delete_all_resources_for_slave(dev, i);
4874080d
JM
657 }
658 /* free master's vlans */
659 i = dev->caps.function;
111c6094 660 mlx4_reset_roce_gids(dev, i);
4874080d
JM
661 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
662 rem_slave_vlans(dev, i);
663 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
664 }
b8924951
JM
665
666 if (type != RES_TR_FREE_SLAVES_ONLY) {
5a0d0a61
JM
667 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
668 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
669 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
670 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
671 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
672 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
673 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
674 }
b8924951
JM
675 kfree(priv->mfunc.master.res_tracker.slave_list);
676 priv->mfunc.master.res_tracker.slave_list = NULL;
677 }
c82e9aa0
EC
678 }
679}
680
54679e14
JM
681static void update_pkey_index(struct mlx4_dev *dev, int slave,
682 struct mlx4_cmd_mailbox *inbox)
c82e9aa0 683{
54679e14
JM
684 u8 sched = *(u8 *)(inbox->buf + 64);
685 u8 orig_index = *(u8 *)(inbox->buf + 35);
686 u8 new_index;
687 struct mlx4_priv *priv = mlx4_priv(dev);
688 int port;
689
690 port = (sched >> 6 & 1) + 1;
691
692 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
693 *(u8 *)(inbox->buf + 35) = new_index;
54679e14
JM
694}
695
696static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
697 u8 slave)
698{
699 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
700 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
701 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
b6ffaeff 702 int port;
c82e9aa0 703
b6ffaeff
JM
704 if (MLX4_QP_ST_UD == ts) {
705 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
706 if (mlx4_is_eth(dev, port))
449fc488
MB
707 qp_ctx->pri_path.mgid_index =
708 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
b6ffaeff
JM
709 else
710 qp_ctx->pri_path.mgid_index = slave | 0x80;
711
712 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
713 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
714 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
715 if (mlx4_is_eth(dev, port)) {
449fc488
MB
716 qp_ctx->pri_path.mgid_index +=
717 mlx4_get_base_gid_ix(dev, slave, port);
b6ffaeff
JM
718 qp_ctx->pri_path.mgid_index &= 0x7f;
719 } else {
720 qp_ctx->pri_path.mgid_index = slave & 0x7F;
721 }
722 }
723 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
724 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
725 if (mlx4_is_eth(dev, port)) {
449fc488
MB
726 qp_ctx->alt_path.mgid_index +=
727 mlx4_get_base_gid_ix(dev, slave, port);
b6ffaeff
JM
728 qp_ctx->alt_path.mgid_index &= 0x7f;
729 } else {
730 qp_ctx->alt_path.mgid_index = slave & 0x7F;
731 }
732 }
54679e14 733 }
c82e9aa0
EC
734}
735
68230242
EBE
736static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
737 u8 slave, int port);
738
3f7fb021
RE
739static int update_vport_qp_param(struct mlx4_dev *dev,
740 struct mlx4_cmd_mailbox *inbox,
b01978ca 741 u8 slave, u32 qpn)
3f7fb021
RE
742{
743 struct mlx4_qp_context *qpc = inbox->buf + 8;
744 struct mlx4_vport_oper_state *vp_oper;
745 struct mlx4_priv *priv;
09e05c3f 746 u32 qp_type;
f5956faf 747 int port, err = 0;
3f7fb021
RE
748
749 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
750 priv = mlx4_priv(dev);
751 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
09e05c3f 752 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3f7fb021 753
68230242
EBE
754 err = handle_counter(dev, qpc, slave, port);
755 if (err)
756 goto out;
757
3f7fb021 758 if (MLX4_VGT != vp_oper->state.default_vlan) {
b01978ca
JM
759 /* the reserved QPs (special, proxy, tunnel)
760 * do not operate over vlans
761 */
762 if (mlx4_is_qp_reserved(dev, qpn))
763 return 0;
764
09e05c3f
MB
765 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
766 if (qp_type == MLX4_QP_ST_UD ||
767 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
768 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
769 *(__be32 *)inbox->buf =
770 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
771 MLX4_QP_OPTPAR_VLAN_STRIPPING);
772 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
773 } else {
774 struct mlx4_update_qp_params params = {.flags = 0};
775
f5956faf
OG
776 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
777 if (err)
778 goto out;
09e05c3f
MB
779 }
780 }
0a6eac24 781
9a892835
MG
782 /* preserve IF_COUNTER flag */
783 qpc->pri_path.vlan_control &=
784 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
0a6eac24
RE
785 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
786 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
9a892835 787 qpc->pri_path.vlan_control |=
0a6eac24
RE
788 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
789 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
790 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
791 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
792 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
793 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
794 } else if (0 != vp_oper->state.default_vlan) {
7c3d21c8
MS
795 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
796 /* vst QinQ should block untagged on TX,
797 * but cvlan is in payload and phv is set so
798 * hw see it as untagged. Block tagged instead.
799 */
800 qpc->pri_path.vlan_control |=
801 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
802 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
803 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
804 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
805 } else { /* vst 802.1Q */
806 qpc->pri_path.vlan_control |=
807 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
808 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
809 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
810 }
7677fc96 811 } else { /* priority tagged */
9a892835 812 qpc->pri_path.vlan_control |=
7677fc96
RE
813 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
814 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
815 }
816
817 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
3f7fb021 818 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
7c3d21c8
MS
819 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
820 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
821 qpc->pri_path.fl |= MLX4_FL_SV;
822 else
823 qpc->pri_path.fl |= MLX4_FL_CV;
7677fc96 824 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
3f7fb021
RE
825 qpc->pri_path.sched_queue &= 0xC7;
826 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
08068cd5 827 qpc->qos_vport = vp_oper->state.qos_vport;
3f7fb021 828 }
e6b6a231 829 if (vp_oper->state.spoofchk) {
7677fc96 830 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
e6b6a231 831 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
e6b6a231 832 }
f5956faf
OG
833out:
834 return err;
3f7fb021
RE
835}
836
c82e9aa0
EC
837static int mpt_mask(struct mlx4_dev *dev)
838{
839 return dev->caps.num_mpts - 1;
840}
841
ae5a2e29
MB
842static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
843{
844 switch (t) {
845 case RES_QP:
846 return "QP";
847 case RES_CQ:
848 return "CQ";
849 case RES_SRQ:
850 return "SRQ";
851 case RES_XRCD:
852 return "XRCD";
853 case RES_MPT:
854 return "MPT";
855 case RES_MTT:
856 return "MTT";
857 case RES_MAC:
858 return "MAC";
859 case RES_VLAN:
860 return "VLAN";
861 case RES_COUNTER:
862 return "COUNTER";
863 case RES_FS_RULE:
864 return "FS_RULE";
865 case RES_EQ:
866 return "EQ";
867 default:
868 return "INVALID RESOURCE";
869 }
870}
871
1e3f7b32 872static void *find_res(struct mlx4_dev *dev, u64 res_id,
c82e9aa0
EC
873 enum mlx4_resource type)
874{
875 struct mlx4_priv *priv = mlx4_priv(dev);
876
4af1c048
HHZ
877 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
878 res_id);
c82e9aa0
EC
879}
880
ae5a2e29
MB
881static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
882 enum mlx4_resource type,
883 void *res, const char *func_name)
c82e9aa0
EC
884{
885 struct res_common *r;
886 int err = 0;
887
888 spin_lock_irq(mlx4_tlock(dev));
889 r = find_res(dev, res_id, type);
890 if (!r) {
891 err = -ENONET;
892 goto exit;
893 }
894
895 if (r->state == RES_ANY_BUSY) {
ae5a2e29
MB
896 mlx4_warn(dev,
897 "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
898 func_name, slave, res_id, mlx4_resource_type_to_str(type),
899 r->func_name);
c82e9aa0
EC
900 err = -EBUSY;
901 goto exit;
902 }
903
904 if (r->owner != slave) {
905 err = -EPERM;
906 goto exit;
907 }
908
909 r->from_state = r->state;
910 r->state = RES_ANY_BUSY;
ae5a2e29 911 r->func_name = func_name;
c82e9aa0
EC
912
913 if (res)
914 *((struct res_common **)res) = r;
915
916exit:
917 spin_unlock_irq(mlx4_tlock(dev));
918 return err;
919}
920
ae5a2e29
MB
921#define get_res(dev, slave, res_id, type, res) \
922 _get_res((dev), (slave), (res_id), (type), (res), __func__)
923
c82e9aa0
EC
924int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
925 enum mlx4_resource type,
aa1ec3dd 926 u64 res_id, int *slave)
c82e9aa0
EC
927{
928
929 struct res_common *r;
930 int err = -ENOENT;
931 int id = res_id;
932
933 if (type == RES_QP)
934 id &= 0x7fffff;
996b0541 935 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
936
937 r = find_res(dev, id, type);
938 if (r) {
939 *slave = r->owner;
940 err = 0;
941 }
996b0541 942 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
943
944 return err;
945}
946
aa1ec3dd 947static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
948 enum mlx4_resource type)
949{
950 struct res_common *r;
951
952 spin_lock_irq(mlx4_tlock(dev));
953 r = find_res(dev, res_id, type);
ae5a2e29 954 if (r) {
c82e9aa0 955 r->state = r->from_state;
ae5a2e29
MB
956 r->func_name = "";
957 }
c82e9aa0
EC
958 spin_unlock_irq(mlx4_tlock(dev));
959}
960
68230242
EBE
961static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
962 u64 in_param, u64 *out_param, int port);
963
964static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
965 int counter_index)
966{
967 struct res_common *r;
968 struct res_counter *counter;
969 int ret = 0;
970
971 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
972 return ret;
973
974 spin_lock_irq(mlx4_tlock(dev));
975 r = find_res(dev, counter_index, RES_COUNTER);
6b94bab0 976 if (!r || r->owner != slave) {
68230242 977 ret = -EINVAL;
6b94bab0
EBE
978 } else {
979 counter = container_of(r, struct res_counter, com);
980 if (!counter->port)
981 counter->port = port;
982 }
68230242
EBE
983
984 spin_unlock_irq(mlx4_tlock(dev));
985 return ret;
986}
987
988static int handle_unexisting_counter(struct mlx4_dev *dev,
989 struct mlx4_qp_context *qpc, u8 slave,
990 int port)
991{
992 struct mlx4_priv *priv = mlx4_priv(dev);
993 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
994 struct res_common *tmp;
995 struct res_counter *counter;
996 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
997 int err = 0;
998
999 spin_lock_irq(mlx4_tlock(dev));
1000 list_for_each_entry(tmp,
1001 &tracker->slave_list[slave].res_list[RES_COUNTER],
1002 list) {
1003 counter = container_of(tmp, struct res_counter, com);
1004 if (port == counter->port) {
1005 qpc->pri_path.counter_index = counter->com.res_id;
1006 spin_unlock_irq(mlx4_tlock(dev));
1007 return 0;
1008 }
1009 }
1010 spin_unlock_irq(mlx4_tlock(dev));
1011
1012 /* No existing counter, need to allocate a new counter */
1013 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1014 port);
1015 if (err == -ENOENT) {
1016 err = 0;
1017 } else if (err && err != -ENOSPC) {
1018 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1019 __func__, slave, err);
1020 } else {
1021 qpc->pri_path.counter_index = counter_idx;
1022 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1023 __func__, slave, qpc->pri_path.counter_index);
1024 err = 0;
1025 }
1026
1027 return err;
1028}
1029
1030static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1031 u8 slave, int port)
1032{
1033 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1034 return handle_existing_counter(dev, slave, port,
1035 qpc->pri_path.counter_index);
1036
1037 return handle_unexisting_counter(dev, qpc, slave, port);
1038}
1039
c82e9aa0
EC
1040static struct res_common *alloc_qp_tr(int id)
1041{
1042 struct res_qp *ret;
1043
31975e27 1044 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
c82e9aa0
EC
1045 if (!ret)
1046 return NULL;
1047
1048 ret->com.res_id = id;
1049 ret->com.state = RES_QP_RESERVED;
2531188b 1050 ret->local_qpn = id;
c82e9aa0
EC
1051 INIT_LIST_HEAD(&ret->mcg_list);
1052 spin_lock_init(&ret->mcg_spl);
2c473ae7 1053 atomic_set(&ret->ref_count, 0);
c82e9aa0
EC
1054
1055 return &ret->com;
1056}
1057
1058static struct res_common *alloc_mtt_tr(int id, int order)
1059{
1060 struct res_mtt *ret;
1061
31975e27 1062 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
c82e9aa0
EC
1063 if (!ret)
1064 return NULL;
1065
1066 ret->com.res_id = id;
1067 ret->order = order;
1068 ret->com.state = RES_MTT_ALLOCATED;
1069 atomic_set(&ret->ref_count, 0);
1070
1071 return &ret->com;
1072}
1073
1074static struct res_common *alloc_mpt_tr(int id, int key)
1075{
1076 struct res_mpt *ret;
1077
31975e27 1078 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
c82e9aa0
EC
1079 if (!ret)
1080 return NULL;
1081
1082 ret->com.res_id = id;
1083 ret->com.state = RES_MPT_RESERVED;
1084 ret->key = key;
1085
1086 return &ret->com;
1087}
1088
1089static struct res_common *alloc_eq_tr(int id)
1090{
1091 struct res_eq *ret;
1092
31975e27 1093 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
c82e9aa0
EC
1094 if (!ret)
1095 return NULL;
1096
1097 ret->com.res_id = id;
1098 ret->com.state = RES_EQ_RESERVED;
1099
1100 return &ret->com;
1101}
1102
1103static struct res_common *alloc_cq_tr(int id)
1104{
1105 struct res_cq *ret;
1106
31975e27 1107 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
c82e9aa0
EC
1108 if (!ret)
1109 return NULL;
1110
1111 ret->com.res_id = id;
1112 ret->com.state = RES_CQ_ALLOCATED;
1113 atomic_set(&ret->ref_count, 0);
1114
1115 return &ret->com;
1116}
1117
1118static struct res_common *alloc_srq_tr(int id)
1119{
1120 struct res_srq *ret;
1121
31975e27 1122 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
c82e9aa0
EC
1123 if (!ret)
1124 return NULL;
1125
1126 ret->com.res_id = id;
1127 ret->com.state = RES_SRQ_ALLOCATED;
1128 atomic_set(&ret->ref_count, 0);
1129
1130 return &ret->com;
1131}
1132
9de92c60 1133static struct res_common *alloc_counter_tr(int id, int port)
c82e9aa0
EC
1134{
1135 struct res_counter *ret;
1136
31975e27 1137 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
c82e9aa0
EC
1138 if (!ret)
1139 return NULL;
1140
1141 ret->com.res_id = id;
1142 ret->com.state = RES_COUNTER_ALLOCATED;
9de92c60 1143 ret->port = port;
c82e9aa0
EC
1144
1145 return &ret->com;
1146}
1147
ba062d52
JM
1148static struct res_common *alloc_xrcdn_tr(int id)
1149{
1150 struct res_xrcdn *ret;
1151
31975e27 1152 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
ba062d52
JM
1153 if (!ret)
1154 return NULL;
1155
1156 ret->com.res_id = id;
1157 ret->com.state = RES_XRCD_ALLOCATED;
1158
1159 return &ret->com;
1160}
1161
2c473ae7 1162static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1b9c6b06
HHZ
1163{
1164 struct res_fs_rule *ret;
1165
31975e27 1166 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1b9c6b06
HHZ
1167 if (!ret)
1168 return NULL;
1169
1170 ret->com.res_id = id;
1171 ret->com.state = RES_FS_RULE_ALLOCATED;
2c473ae7 1172 ret->qpn = qpn;
1b9c6b06
HHZ
1173 return &ret->com;
1174}
1175
aa1ec3dd 1176static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
1177 int extra)
1178{
1179 struct res_common *ret;
1180
1181 switch (type) {
1182 case RES_QP:
1183 ret = alloc_qp_tr(id);
1184 break;
1185 case RES_MPT:
1186 ret = alloc_mpt_tr(id, extra);
1187 break;
1188 case RES_MTT:
1189 ret = alloc_mtt_tr(id, extra);
1190 break;
1191 case RES_EQ:
1192 ret = alloc_eq_tr(id);
1193 break;
1194 case RES_CQ:
1195 ret = alloc_cq_tr(id);
1196 break;
1197 case RES_SRQ:
1198 ret = alloc_srq_tr(id);
1199 break;
1200 case RES_MAC:
c20862c8 1201 pr_err("implementation missing\n");
c82e9aa0
EC
1202 return NULL;
1203 case RES_COUNTER:
9de92c60 1204 ret = alloc_counter_tr(id, extra);
c82e9aa0 1205 break;
ba062d52
JM
1206 case RES_XRCD:
1207 ret = alloc_xrcdn_tr(id);
1208 break;
1b9c6b06 1209 case RES_FS_RULE:
2c473ae7 1210 ret = alloc_fs_rule_tr(id, extra);
1b9c6b06 1211 break;
c82e9aa0
EC
1212 default:
1213 return NULL;
1214 }
1215 if (ret)
1216 ret->owner = slave;
1217
1218 return ret;
1219}
1220
62a89055
EBE
1221int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1222 struct mlx4_counter *data)
1223{
1224 struct mlx4_priv *priv = mlx4_priv(dev);
1225 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1226 struct res_common *tmp;
1227 struct res_counter *counter;
1228 int *counters_arr;
1229 int i = 0, err = 0;
1230
1231 memset(data, 0, sizeof(*data));
1232
1233 counters_arr = kmalloc_array(dev->caps.max_counters,
1234 sizeof(*counters_arr), GFP_KERNEL);
1235 if (!counters_arr)
1236 return -ENOMEM;
1237
1238 spin_lock_irq(mlx4_tlock(dev));
1239 list_for_each_entry(tmp,
1240 &tracker->slave_list[slave].res_list[RES_COUNTER],
1241 list) {
1242 counter = container_of(tmp, struct res_counter, com);
1243 if (counter->port == port) {
1244 counters_arr[i] = (int)tmp->res_id;
1245 i++;
1246 }
1247 }
1248 spin_unlock_irq(mlx4_tlock(dev));
1249 counters_arr[i] = -1;
1250
1251 i = 0;
1252
1253 while (counters_arr[i] != -1) {
1254 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1255 0);
1256 if (err) {
1257 memset(data, 0, sizeof(*data));
1258 goto table_changed;
1259 }
1260 i++;
1261 }
1262
1263table_changed:
1264 kfree(counters_arr);
1265 return 0;
1266}
1267
aa1ec3dd 1268static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
1269 enum mlx4_resource type, int extra)
1270{
1271 int i;
1272 int err;
1273 struct mlx4_priv *priv = mlx4_priv(dev);
1274 struct res_common **res_arr;
1275 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 1276 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0 1277
31975e27 1278 res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
c82e9aa0
EC
1279 if (!res_arr)
1280 return -ENOMEM;
1281
1282 for (i = 0; i < count; ++i) {
1283 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1284 if (!res_arr[i]) {
1285 for (--i; i >= 0; --i)
1286 kfree(res_arr[i]);
1287
1288 kfree(res_arr);
1289 return -ENOMEM;
1290 }
1291 }
1292
1293 spin_lock_irq(mlx4_tlock(dev));
1294 for (i = 0; i < count; ++i) {
1295 if (find_res(dev, base + i, type)) {
1296 err = -EEXIST;
1297 goto undo;
1298 }
4af1c048 1299 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
1300 if (err)
1301 goto undo;
1302 list_add_tail(&res_arr[i]->list,
1303 &tracker->slave_list[slave].res_list[type]);
1304 }
1305 spin_unlock_irq(mlx4_tlock(dev));
1306 kfree(res_arr);
1307
1308 return 0;
1309
1310undo:
95e19633 1311 for (--i; i >= 0; --i) {
4af1c048 1312 rb_erase(&res_arr[i]->node, root);
95e19633
SM
1313 list_del_init(&res_arr[i]->list);
1314 }
c82e9aa0
EC
1315
1316 spin_unlock_irq(mlx4_tlock(dev));
1317
1318 for (i = 0; i < count; ++i)
1319 kfree(res_arr[i]);
1320
1321 kfree(res_arr);
1322
1323 return err;
1324}
1325
1326static int remove_qp_ok(struct res_qp *res)
1327{
2c473ae7
HHZ
1328 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1329 !list_empty(&res->mcg_list)) {
1330 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1331 res->com.state, atomic_read(&res->ref_count));
c82e9aa0 1332 return -EBUSY;
2c473ae7 1333 } else if (res->com.state != RES_QP_RESERVED) {
c82e9aa0 1334 return -EPERM;
2c473ae7 1335 }
c82e9aa0
EC
1336
1337 return 0;
1338}
1339
1340static int remove_mtt_ok(struct res_mtt *res, int order)
1341{
1342 if (res->com.state == RES_MTT_BUSY ||
1343 atomic_read(&res->ref_count)) {
c20862c8
AV
1344 pr_devel("%s-%d: state %s, ref_count %d\n",
1345 __func__, __LINE__,
1346 mtt_states_str(res->com.state),
1347 atomic_read(&res->ref_count));
c82e9aa0
EC
1348 return -EBUSY;
1349 } else if (res->com.state != RES_MTT_ALLOCATED)
1350 return -EPERM;
1351 else if (res->order != order)
1352 return -EINVAL;
1353
1354 return 0;
1355}
1356
1357static int remove_mpt_ok(struct res_mpt *res)
1358{
1359 if (res->com.state == RES_MPT_BUSY)
1360 return -EBUSY;
1361 else if (res->com.state != RES_MPT_RESERVED)
1362 return -EPERM;
1363
1364 return 0;
1365}
1366
1367static int remove_eq_ok(struct res_eq *res)
1368{
1369 if (res->com.state == RES_MPT_BUSY)
1370 return -EBUSY;
1371 else if (res->com.state != RES_MPT_RESERVED)
1372 return -EPERM;
1373
1374 return 0;
1375}
1376
1377static int remove_counter_ok(struct res_counter *res)
1378{
1379 if (res->com.state == RES_COUNTER_BUSY)
1380 return -EBUSY;
1381 else if (res->com.state != RES_COUNTER_ALLOCATED)
1382 return -EPERM;
1383
1384 return 0;
1385}
1386
ba062d52
JM
1387static int remove_xrcdn_ok(struct res_xrcdn *res)
1388{
1389 if (res->com.state == RES_XRCD_BUSY)
1390 return -EBUSY;
1391 else if (res->com.state != RES_XRCD_ALLOCATED)
1392 return -EPERM;
1393
1394 return 0;
1395}
1396
1b9c6b06
HHZ
1397static int remove_fs_rule_ok(struct res_fs_rule *res)
1398{
1399 if (res->com.state == RES_FS_RULE_BUSY)
1400 return -EBUSY;
1401 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1402 return -EPERM;
1403
1404 return 0;
1405}
1406
c82e9aa0
EC
1407static int remove_cq_ok(struct res_cq *res)
1408{
1409 if (res->com.state == RES_CQ_BUSY)
1410 return -EBUSY;
1411 else if (res->com.state != RES_CQ_ALLOCATED)
1412 return -EPERM;
1413
1414 return 0;
1415}
1416
1417static int remove_srq_ok(struct res_srq *res)
1418{
1419 if (res->com.state == RES_SRQ_BUSY)
1420 return -EBUSY;
1421 else if (res->com.state != RES_SRQ_ALLOCATED)
1422 return -EPERM;
1423
1424 return 0;
1425}
1426
1427static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1428{
1429 switch (type) {
1430 case RES_QP:
1431 return remove_qp_ok((struct res_qp *)res);
1432 case RES_CQ:
1433 return remove_cq_ok((struct res_cq *)res);
1434 case RES_SRQ:
1435 return remove_srq_ok((struct res_srq *)res);
1436 case RES_MPT:
1437 return remove_mpt_ok((struct res_mpt *)res);
1438 case RES_MTT:
1439 return remove_mtt_ok((struct res_mtt *)res, extra);
1440 case RES_MAC:
72b8eaab 1441 return -EOPNOTSUPP;
c82e9aa0
EC
1442 case RES_EQ:
1443 return remove_eq_ok((struct res_eq *)res);
1444 case RES_COUNTER:
1445 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
1446 case RES_XRCD:
1447 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
1448 case RES_FS_RULE:
1449 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
1450 default:
1451 return -EINVAL;
1452 }
1453}
1454
aa1ec3dd 1455static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
1456 enum mlx4_resource type, int extra)
1457{
aa1ec3dd 1458 u64 i;
c82e9aa0
EC
1459 int err;
1460 struct mlx4_priv *priv = mlx4_priv(dev);
1461 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1462 struct res_common *r;
1463
1464 spin_lock_irq(mlx4_tlock(dev));
1465 for (i = base; i < base + count; ++i) {
4af1c048 1466 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
1467 if (!r) {
1468 err = -ENOENT;
1469 goto out;
1470 }
1471 if (r->owner != slave) {
1472 err = -EPERM;
1473 goto out;
1474 }
1475 err = remove_ok(r, type, extra);
1476 if (err)
1477 goto out;
1478 }
1479
1480 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
1481 r = res_tracker_lookup(&tracker->res_tree[type], i);
1482 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
1483 list_del(&r->list);
1484 kfree(r);
1485 }
1486 err = 0;
1487
1488out:
1489 spin_unlock_irq(mlx4_tlock(dev));
1490
1491 return err;
1492}
1493
1494static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1495 enum res_qp_states state, struct res_qp **qp,
1496 int alloc)
1497{
1498 struct mlx4_priv *priv = mlx4_priv(dev);
1499 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1500 struct res_qp *r;
1501 int err = 0;
1502
1503 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1504 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
1505 if (!r)
1506 err = -ENOENT;
1507 else if (r->com.owner != slave)
1508 err = -EPERM;
1509 else {
1510 switch (state) {
1511 case RES_QP_BUSY:
aa1ec3dd 1512 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
1513 __func__, r->com.res_id);
1514 err = -EBUSY;
1515 break;
1516
1517 case RES_QP_RESERVED:
1518 if (r->com.state == RES_QP_MAPPED && !alloc)
1519 break;
1520
aa1ec3dd 1521 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
1522 err = -EINVAL;
1523 break;
1524
1525 case RES_QP_MAPPED:
1526 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1527 r->com.state == RES_QP_HW)
1528 break;
1529 else {
aa1ec3dd 1530 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
1531 r->com.res_id);
1532 err = -EINVAL;
1533 }
1534
1535 break;
1536
1537 case RES_QP_HW:
1538 if (r->com.state != RES_QP_MAPPED)
1539 err = -EINVAL;
1540 break;
1541 default:
1542 err = -EINVAL;
1543 }
1544
1545 if (!err) {
1546 r->com.from_state = r->com.state;
1547 r->com.to_state = state;
1548 r->com.state = RES_QP_BUSY;
1549 if (qp)
64699336 1550 *qp = r;
c82e9aa0
EC
1551 }
1552 }
1553
1554 spin_unlock_irq(mlx4_tlock(dev));
1555
1556 return err;
1557}
1558
1559static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1560 enum res_mpt_states state, struct res_mpt **mpt)
1561{
1562 struct mlx4_priv *priv = mlx4_priv(dev);
1563 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1564 struct res_mpt *r;
1565 int err = 0;
1566
1567 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1568 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
1569 if (!r)
1570 err = -ENOENT;
1571 else if (r->com.owner != slave)
1572 err = -EPERM;
1573 else {
1574 switch (state) {
1575 case RES_MPT_BUSY:
1576 err = -EINVAL;
1577 break;
1578
1579 case RES_MPT_RESERVED:
1580 if (r->com.state != RES_MPT_MAPPED)
1581 err = -EINVAL;
1582 break;
1583
1584 case RES_MPT_MAPPED:
1585 if (r->com.state != RES_MPT_RESERVED &&
1586 r->com.state != RES_MPT_HW)
1587 err = -EINVAL;
1588 break;
1589
1590 case RES_MPT_HW:
1591 if (r->com.state != RES_MPT_MAPPED)
1592 err = -EINVAL;
1593 break;
1594 default:
1595 err = -EINVAL;
1596 }
1597
1598 if (!err) {
1599 r->com.from_state = r->com.state;
1600 r->com.to_state = state;
1601 r->com.state = RES_MPT_BUSY;
1602 if (mpt)
64699336 1603 *mpt = r;
c82e9aa0
EC
1604 }
1605 }
1606
1607 spin_unlock_irq(mlx4_tlock(dev));
1608
1609 return err;
1610}
1611
1612static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1613 enum res_eq_states state, struct res_eq **eq)
1614{
1615 struct mlx4_priv *priv = mlx4_priv(dev);
1616 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1617 struct res_eq *r;
1618 int err = 0;
1619
1620 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1621 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
1622 if (!r)
1623 err = -ENOENT;
1624 else if (r->com.owner != slave)
1625 err = -EPERM;
1626 else {
1627 switch (state) {
1628 case RES_EQ_BUSY:
1629 err = -EINVAL;
1630 break;
1631
1632 case RES_EQ_RESERVED:
1633 if (r->com.state != RES_EQ_HW)
1634 err = -EINVAL;
1635 break;
1636
1637 case RES_EQ_HW:
1638 if (r->com.state != RES_EQ_RESERVED)
1639 err = -EINVAL;
1640 break;
1641
1642 default:
1643 err = -EINVAL;
1644 }
1645
1646 if (!err) {
1647 r->com.from_state = r->com.state;
1648 r->com.to_state = state;
1649 r->com.state = RES_EQ_BUSY;
c82e9aa0
EC
1650 }
1651 }
1652
1653 spin_unlock_irq(mlx4_tlock(dev));
1654
a4256bc9
AB
1655 if (!err && eq)
1656 *eq = r;
1657
c82e9aa0
EC
1658 return err;
1659}
1660
1661static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1662 enum res_cq_states state, struct res_cq **cq)
1663{
1664 struct mlx4_priv *priv = mlx4_priv(dev);
1665 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1666 struct res_cq *r;
1667 int err;
1668
1669 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1670 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c9218a9e 1671 if (!r) {
c82e9aa0 1672 err = -ENOENT;
c9218a9e 1673 } else if (r->com.owner != slave) {
c82e9aa0 1674 err = -EPERM;
c9218a9e
PB
1675 } else if (state == RES_CQ_ALLOCATED) {
1676 if (r->com.state != RES_CQ_HW)
c82e9aa0 1677 err = -EINVAL;
c9218a9e
PB
1678 else if (atomic_read(&r->ref_count))
1679 err = -EBUSY;
1680 else
1681 err = 0;
1682 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1683 err = -EINVAL;
1684 } else {
1685 err = 0;
1686 }
c82e9aa0 1687
c9218a9e
PB
1688 if (!err) {
1689 r->com.from_state = r->com.state;
1690 r->com.to_state = state;
1691 r->com.state = RES_CQ_BUSY;
1692 if (cq)
1693 *cq = r;
c82e9aa0
EC
1694 }
1695
1696 spin_unlock_irq(mlx4_tlock(dev));
1697
1698 return err;
1699}
1700
1701static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
f088cbb8 1702 enum res_srq_states state, struct res_srq **srq)
c82e9aa0
EC
1703{
1704 struct mlx4_priv *priv = mlx4_priv(dev);
1705 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1706 struct res_srq *r;
1707 int err = 0;
1708
1709 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1710 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
f088cbb8 1711 if (!r) {
c82e9aa0 1712 err = -ENOENT;
f088cbb8 1713 } else if (r->com.owner != slave) {
c82e9aa0 1714 err = -EPERM;
f088cbb8
PB
1715 } else if (state == RES_SRQ_ALLOCATED) {
1716 if (r->com.state != RES_SRQ_HW)
c82e9aa0 1717 err = -EINVAL;
f088cbb8
PB
1718 else if (atomic_read(&r->ref_count))
1719 err = -EBUSY;
1720 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1721 err = -EINVAL;
1722 }
c82e9aa0 1723
f088cbb8
PB
1724 if (!err) {
1725 r->com.from_state = r->com.state;
1726 r->com.to_state = state;
1727 r->com.state = RES_SRQ_BUSY;
1728 if (srq)
1729 *srq = r;
c82e9aa0
EC
1730 }
1731
1732 spin_unlock_irq(mlx4_tlock(dev));
1733
1734 return err;
1735}
1736
1737static void res_abort_move(struct mlx4_dev *dev, int slave,
1738 enum mlx4_resource type, int id)
1739{
1740 struct mlx4_priv *priv = mlx4_priv(dev);
1741 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1742 struct res_common *r;
1743
1744 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1745 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1746 if (r && (r->owner == slave))
1747 r->state = r->from_state;
1748 spin_unlock_irq(mlx4_tlock(dev));
1749}
1750
1751static void res_end_move(struct mlx4_dev *dev, int slave,
1752 enum mlx4_resource type, int id)
1753{
1754 struct mlx4_priv *priv = mlx4_priv(dev);
1755 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1756 struct res_common *r;
1757
1758 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1759 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1760 if (r && (r->owner == slave))
1761 r->state = r->to_state;
1762 spin_unlock_irq(mlx4_tlock(dev));
1763}
1764
1765static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1766{
e2c76824
JM
1767 return mlx4_is_qp_reserved(dev, qpn) &&
1768 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
c82e9aa0
EC
1769}
1770
54679e14
JM
1771static int fw_reserved(struct mlx4_dev *dev, int qpn)
1772{
1773 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
c82e9aa0
EC
1774}
1775
1776static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1777 u64 in_param, u64 *out_param)
1778{
1779 int err;
1780 int count;
1781 int align;
1782 int base;
1783 int qpn;
ddae0349 1784 u8 flags;
c82e9aa0
EC
1785
1786 switch (op) {
1787 case RES_OP_RESERVE:
2d5c57d7 1788 count = get_param_l(&in_param) & 0xffffff;
ddae0349
EE
1789 /* Turn off all unsupported QP allocation flags that the
1790 * slave tries to set.
1791 */
1792 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
c82e9aa0 1793 align = get_param_h(&in_param);
146f3ef4 1794 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
1795 if (err)
1796 return err;
1797
ddae0349 1798 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
146f3ef4
JM
1799 if (err) {
1800 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1801 return err;
1802 }
1803
c82e9aa0
EC
1804 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1805 if (err) {
146f3ef4 1806 mlx4_release_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
1807 __mlx4_qp_release_range(dev, base, count);
1808 return err;
1809 }
1810 set_param_l(out_param, base);
1811 break;
1812 case RES_OP_MAP_ICM:
1813 qpn = get_param_l(&in_param) & 0x7fffff;
1814 if (valid_reserved(dev, slave, qpn)) {
1815 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1816 if (err)
1817 return err;
1818 }
1819
1820 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1821 NULL, 1);
1822 if (err)
1823 return err;
1824
54679e14 1825 if (!fw_reserved(dev, qpn)) {
8900b894 1826 err = __mlx4_qp_alloc_icm(dev, qpn);
c82e9aa0
EC
1827 if (err) {
1828 res_abort_move(dev, slave, RES_QP, qpn);
1829 return err;
1830 }
1831 }
1832
1833 res_end_move(dev, slave, RES_QP, qpn);
1834 break;
1835
1836 default:
1837 err = -EINVAL;
1838 break;
1839 }
1840 return err;
1841}
1842
1843static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1844 u64 in_param, u64 *out_param)
1845{
1846 int err = -EINVAL;
1847 int base;
1848 int order;
1849
1850 if (op != RES_OP_RESERVE_AND_MAP)
1851 return err;
1852
1853 order = get_param_l(&in_param);
146f3ef4
JM
1854
1855 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1856 if (err)
1857 return err;
1858
c82e9aa0 1859 base = __mlx4_alloc_mtt_range(dev, order);
146f3ef4
JM
1860 if (base == -1) {
1861 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 1862 return -ENOMEM;
146f3ef4 1863 }
c82e9aa0
EC
1864
1865 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
146f3ef4
JM
1866 if (err) {
1867 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 1868 __mlx4_free_mtt_range(dev, base, order);
146f3ef4 1869 } else {
c82e9aa0 1870 set_param_l(out_param, base);
146f3ef4 1871 }
c82e9aa0
EC
1872
1873 return err;
1874}
1875
1876static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1877 u64 in_param, u64 *out_param)
1878{
1879 int err = -EINVAL;
1880 int index;
1881 int id;
1882 struct res_mpt *mpt;
1883
1884 switch (op) {
1885 case RES_OP_RESERVE:
146f3ef4
JM
1886 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1887 if (err)
1888 break;
1889
b20e519a 1890 index = __mlx4_mpt_reserve(dev);
146f3ef4
JM
1891 if (index == -1) {
1892 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
c82e9aa0 1893 break;
146f3ef4 1894 }
c82e9aa0
EC
1895 id = index & mpt_mask(dev);
1896
1897 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1898 if (err) {
146f3ef4 1899 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
b20e519a 1900 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1901 break;
1902 }
1903 set_param_l(out_param, index);
1904 break;
1905 case RES_OP_MAP_ICM:
1906 index = get_param_l(&in_param);
1907 id = index & mpt_mask(dev);
1908 err = mr_res_start_move_to(dev, slave, id,
1909 RES_MPT_MAPPED, &mpt);
1910 if (err)
1911 return err;
1912
8900b894 1913 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
c82e9aa0
EC
1914 if (err) {
1915 res_abort_move(dev, slave, RES_MPT, id);
1916 return err;
1917 }
1918
1919 res_end_move(dev, slave, RES_MPT, id);
1920 break;
1921 }
1922 return err;
1923}
1924
1925static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1926 u64 in_param, u64 *out_param)
1927{
1928 int cqn;
1929 int err;
1930
1931 switch (op) {
1932 case RES_OP_RESERVE_AND_MAP:
146f3ef4 1933 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
1934 if (err)
1935 break;
1936
146f3ef4
JM
1937 err = __mlx4_cq_alloc_icm(dev, &cqn);
1938 if (err) {
1939 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1940 break;
1941 }
1942
c82e9aa0
EC
1943 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1944 if (err) {
146f3ef4 1945 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
1946 __mlx4_cq_free_icm(dev, cqn);
1947 break;
1948 }
1949
1950 set_param_l(out_param, cqn);
1951 break;
1952
1953 default:
1954 err = -EINVAL;
1955 }
1956
1957 return err;
1958}
1959
1960static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1961 u64 in_param, u64 *out_param)
1962{
1963 int srqn;
1964 int err;
1965
1966 switch (op) {
1967 case RES_OP_RESERVE_AND_MAP:
146f3ef4 1968 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
1969 if (err)
1970 break;
1971
146f3ef4
JM
1972 err = __mlx4_srq_alloc_icm(dev, &srqn);
1973 if (err) {
1974 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1975 break;
1976 }
1977
c82e9aa0
EC
1978 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1979 if (err) {
146f3ef4 1980 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
1981 __mlx4_srq_free_icm(dev, srqn);
1982 break;
1983 }
1984
1985 set_param_l(out_param, srqn);
1986 break;
1987
1988 default:
1989 err = -EINVAL;
1990 }
1991
1992 return err;
1993}
1994
2f5bb473
JM
1995static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1996 u8 smac_index, u64 *mac)
1997{
1998 struct mlx4_priv *priv = mlx4_priv(dev);
1999 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2000 struct list_head *mac_list =
2001 &tracker->slave_list[slave].res_list[RES_MAC];
2002 struct mac_res *res, *tmp;
2003
2004 list_for_each_entry_safe(res, tmp, mac_list, list) {
2005 if (res->smac_index == smac_index && res->port == (u8) port) {
2006 *mac = res->mac;
2007 return 0;
2008 }
2009 }
2010 return -ENOENT;
2011}
2012
2013static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
c82e9aa0
EC
2014{
2015 struct mlx4_priv *priv = mlx4_priv(dev);
2016 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2f5bb473
JM
2017 struct list_head *mac_list =
2018 &tracker->slave_list[slave].res_list[RES_MAC];
2019 struct mac_res *res, *tmp;
2020
2021 list_for_each_entry_safe(res, tmp, mac_list, list) {
2022 if (res->mac == mac && res->port == (u8) port) {
2023 /* mac found. update ref count */
2024 ++res->ref_count;
2025 return 0;
2026 }
2027 }
c82e9aa0 2028
146f3ef4
JM
2029 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2030 return -EINVAL;
31975e27 2031 res = kzalloc(sizeof(*res), GFP_KERNEL);
146f3ef4
JM
2032 if (!res) {
2033 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
c82e9aa0 2034 return -ENOMEM;
146f3ef4 2035 }
c82e9aa0
EC
2036 res->mac = mac;
2037 res->port = (u8) port;
2f5bb473
JM
2038 res->smac_index = smac_index;
2039 res->ref_count = 1;
c82e9aa0
EC
2040 list_add_tail(&res->list,
2041 &tracker->slave_list[slave].res_list[RES_MAC]);
2042 return 0;
2043}
2044
2045static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2046 int port)
2047{
2048 struct mlx4_priv *priv = mlx4_priv(dev);
2049 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2050 struct list_head *mac_list =
2051 &tracker->slave_list[slave].res_list[RES_MAC];
2052 struct mac_res *res, *tmp;
2053
2054 list_for_each_entry_safe(res, tmp, mac_list, list) {
2055 if (res->mac == mac && res->port == (u8) port) {
2f5bb473
JM
2056 if (!--res->ref_count) {
2057 list_del(&res->list);
2058 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2059 kfree(res);
2060 }
c82e9aa0
EC
2061 break;
2062 }
2063 }
2064}
2065
2066static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2067{
2068 struct mlx4_priv *priv = mlx4_priv(dev);
2069 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2070 struct list_head *mac_list =
2071 &tracker->slave_list[slave].res_list[RES_MAC];
2072 struct mac_res *res, *tmp;
2f5bb473 2073 int i;
c82e9aa0
EC
2074
2075 list_for_each_entry_safe(res, tmp, mac_list, list) {
2076 list_del(&res->list);
2f5bb473
JM
2077 /* dereference the mac the num times the slave referenced it */
2078 for (i = 0; i < res->ref_count; i++)
2079 __mlx4_unregister_mac(dev, res->port, res->mac);
146f3ef4 2080 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
c82e9aa0
EC
2081 kfree(res);
2082 }
2083}
2084
2085static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2086 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
2087{
2088 int err = -EINVAL;
2089 int port;
2090 u64 mac;
2f5bb473 2091 u8 smac_index;
c82e9aa0
EC
2092
2093 if (op != RES_OP_RESERVE_AND_MAP)
2094 return err;
2095
acddd5dd 2096 port = !in_port ? get_param_l(out_param) : in_port;
449fc488
MB
2097 port = mlx4_slave_convert_port(
2098 dev, slave, port);
2099
2100 if (port < 0)
2101 return -EINVAL;
c82e9aa0
EC
2102 mac = in_param;
2103
2104 err = __mlx4_register_mac(dev, port, mac);
2105 if (err >= 0) {
2f5bb473 2106 smac_index = err;
c82e9aa0
EC
2107 set_param_l(out_param, err);
2108 err = 0;
2109 }
2110
2111 if (!err) {
2f5bb473 2112 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
c82e9aa0
EC
2113 if (err)
2114 __mlx4_unregister_mac(dev, port, mac);
2115 }
2116 return err;
2117}
2118
4874080d
JM
2119static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2120 int port, int vlan_index)
ffe455ad 2121{
4874080d
JM
2122 struct mlx4_priv *priv = mlx4_priv(dev);
2123 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2124 struct list_head *vlan_list =
2125 &tracker->slave_list[slave].res_list[RES_VLAN];
2126 struct vlan_res *res, *tmp;
2127
2128 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2129 if (res->vlan == vlan && res->port == (u8) port) {
2130 /* vlan found. update ref count */
2131 ++res->ref_count;
2132 return 0;
2133 }
2134 }
2135
146f3ef4
JM
2136 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2137 return -EINVAL;
4874080d 2138 res = kzalloc(sizeof(*res), GFP_KERNEL);
146f3ef4
JM
2139 if (!res) {
2140 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
4874080d 2141 return -ENOMEM;
146f3ef4 2142 }
4874080d
JM
2143 res->vlan = vlan;
2144 res->port = (u8) port;
2145 res->vlan_index = vlan_index;
2146 res->ref_count = 1;
2147 list_add_tail(&res->list,
2148 &tracker->slave_list[slave].res_list[RES_VLAN]);
ffe455ad
EE
2149 return 0;
2150}
2151
4874080d
JM
2152
2153static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2154 int port)
2155{
2156 struct mlx4_priv *priv = mlx4_priv(dev);
2157 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2158 struct list_head *vlan_list =
2159 &tracker->slave_list[slave].res_list[RES_VLAN];
2160 struct vlan_res *res, *tmp;
2161
2162 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2163 if (res->vlan == vlan && res->port == (u8) port) {
2164 if (!--res->ref_count) {
2165 list_del(&res->list);
146f3ef4
JM
2166 mlx4_release_resource(dev, slave, RES_VLAN,
2167 1, port);
4874080d
JM
2168 kfree(res);
2169 }
2170 break;
2171 }
2172 }
2173}
2174
2175static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2176{
2177 struct mlx4_priv *priv = mlx4_priv(dev);
2178 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2179 struct list_head *vlan_list =
2180 &tracker->slave_list[slave].res_list[RES_VLAN];
2181 struct vlan_res *res, *tmp;
2182 int i;
2183
2184 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2185 list_del(&res->list);
2186 /* dereference the vlan the num times the slave referenced it */
2187 for (i = 0; i < res->ref_count; i++)
2188 __mlx4_unregister_vlan(dev, res->port, res->vlan);
146f3ef4 2189 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
4874080d
JM
2190 kfree(res);
2191 }
2192}
2193
2194static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2c957ff2 2195 u64 in_param, u64 *out_param, int in_port)
4874080d 2196{
2c957ff2
JM
2197 struct mlx4_priv *priv = mlx4_priv(dev);
2198 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
4874080d
JM
2199 int err;
2200 u16 vlan;
2201 int vlan_index;
2c957ff2
JM
2202 int port;
2203
2204 port = !in_port ? get_param_l(out_param) : in_port;
4874080d
JM
2205
2206 if (!port || op != RES_OP_RESERVE_AND_MAP)
2207 return -EINVAL;
2208
449fc488
MB
2209 port = mlx4_slave_convert_port(
2210 dev, slave, port);
2211
2212 if (port < 0)
2213 return -EINVAL;
2c957ff2
JM
2214 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2215 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2216 slave_state[slave].old_vlan_api = true;
2217 return 0;
2218 }
2219
4874080d
JM
2220 vlan = (u16) in_param;
2221
2222 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2223 if (!err) {
2224 set_param_l(out_param, (u32) vlan_index);
2225 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2226 if (err)
2227 __mlx4_unregister_vlan(dev, port, vlan);
2228 }
2229 return err;
2230}
2231
ba062d52 2232static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
68230242 2233 u64 in_param, u64 *out_param, int port)
ba062d52
JM
2234{
2235 u32 index;
2236 int err;
2237
2238 if (op != RES_OP_RESERVE)
2239 return -EINVAL;
2240
146f3ef4 2241 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52
JM
2242 if (err)
2243 return err;
2244
146f3ef4
JM
2245 err = __mlx4_counter_alloc(dev, &index);
2246 if (err) {
2247 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2248 return err;
2249 }
2250
68230242 2251 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
146f3ef4 2252 if (err) {
ba062d52 2253 __mlx4_counter_free(dev, index);
146f3ef4
JM
2254 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2255 } else {
ba062d52 2256 set_param_l(out_param, index);
146f3ef4 2257 }
ba062d52
JM
2258
2259 return err;
2260}
2261
2262static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2263 u64 in_param, u64 *out_param)
2264{
2265 u32 xrcdn;
2266 int err;
2267
2268 if (op != RES_OP_RESERVE)
2269 return -EINVAL;
2270
2271 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2272 if (err)
2273 return err;
2274
2275 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2276 if (err)
2277 __mlx4_xrcd_free(dev, xrcdn);
2278 else
2279 set_param_l(out_param, xrcdn);
2280
2281 return err;
2282}
2283
c82e9aa0
EC
2284int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2285 struct mlx4_vhcr *vhcr,
2286 struct mlx4_cmd_mailbox *inbox,
2287 struct mlx4_cmd_mailbox *outbox,
2288 struct mlx4_cmd_info *cmd)
2289{
2290 int err;
2291 int alop = vhcr->op_modifier;
2292
acddd5dd 2293 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
2294 case RES_QP:
2295 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2296 vhcr->in_param, &vhcr->out_param);
2297 break;
2298
2299 case RES_MTT:
2300 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2301 vhcr->in_param, &vhcr->out_param);
2302 break;
2303
2304 case RES_MPT:
2305 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2306 vhcr->in_param, &vhcr->out_param);
2307 break;
2308
2309 case RES_CQ:
2310 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2311 vhcr->in_param, &vhcr->out_param);
2312 break;
2313
2314 case RES_SRQ:
2315 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2316 vhcr->in_param, &vhcr->out_param);
2317 break;
2318
2319 case RES_MAC:
2320 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2321 vhcr->in_param, &vhcr->out_param,
2322 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
2323 break;
2324
ffe455ad
EE
2325 case RES_VLAN:
2326 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2327 vhcr->in_param, &vhcr->out_param,
2328 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
2329 break;
2330
ba062d52
JM
2331 case RES_COUNTER:
2332 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
68230242 2333 vhcr->in_param, &vhcr->out_param, 0);
ba062d52
JM
2334 break;
2335
2336 case RES_XRCD:
2337 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2338 vhcr->in_param, &vhcr->out_param);
2339 break;
2340
c82e9aa0
EC
2341 default:
2342 err = -EINVAL;
2343 break;
2344 }
2345
2346 return err;
2347}
2348
2349static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2350 u64 in_param)
2351{
2352 int err;
2353 int count;
2354 int base;
2355 int qpn;
2356
2357 switch (op) {
2358 case RES_OP_RESERVE:
2359 base = get_param_l(&in_param) & 0x7fffff;
2360 count = get_param_h(&in_param);
2361 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2362 if (err)
2363 break;
146f3ef4 2364 mlx4_release_resource(dev, slave, RES_QP, count, 0);
c82e9aa0
EC
2365 __mlx4_qp_release_range(dev, base, count);
2366 break;
2367 case RES_OP_MAP_ICM:
2368 qpn = get_param_l(&in_param) & 0x7fffff;
2369 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2370 NULL, 0);
2371 if (err)
2372 return err;
2373
54679e14 2374 if (!fw_reserved(dev, qpn))
c82e9aa0
EC
2375 __mlx4_qp_free_icm(dev, qpn);
2376
2377 res_end_move(dev, slave, RES_QP, qpn);
2378
2379 if (valid_reserved(dev, slave, qpn))
2380 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2381 break;
2382 default:
2383 err = -EINVAL;
2384 break;
2385 }
2386 return err;
2387}
2388
2389static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2390 u64 in_param, u64 *out_param)
2391{
2392 int err = -EINVAL;
2393 int base;
2394 int order;
2395
2396 if (op != RES_OP_RESERVE_AND_MAP)
2397 return err;
2398
2399 base = get_param_l(&in_param);
2400 order = get_param_h(&in_param);
2401 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
146f3ef4
JM
2402 if (!err) {
2403 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
c82e9aa0 2404 __mlx4_free_mtt_range(dev, base, order);
146f3ef4 2405 }
c82e9aa0
EC
2406 return err;
2407}
2408
2409static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2410 u64 in_param)
2411{
2412 int err = -EINVAL;
2413 int index;
2414 int id;
2415 struct res_mpt *mpt;
2416
2417 switch (op) {
2418 case RES_OP_RESERVE:
2419 index = get_param_l(&in_param);
2420 id = index & mpt_mask(dev);
2421 err = get_res(dev, slave, id, RES_MPT, &mpt);
2422 if (err)
2423 break;
2424 index = mpt->key;
2425 put_res(dev, slave, id, RES_MPT);
2426
2427 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2428 if (err)
2429 break;
146f3ef4 2430 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
b20e519a 2431 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
2432 break;
2433 case RES_OP_MAP_ICM:
5d4de16c
CJ
2434 index = get_param_l(&in_param);
2435 id = index & mpt_mask(dev);
2436 err = mr_res_start_move_to(dev, slave, id,
2437 RES_MPT_RESERVED, &mpt);
2438 if (err)
c82e9aa0 2439 return err;
5d4de16c
CJ
2440
2441 __mlx4_mpt_free_icm(dev, mpt->key);
2442 res_end_move(dev, slave, RES_MPT, id);
c82e9aa0
EC
2443 break;
2444 default:
2445 err = -EINVAL;
2446 break;
2447 }
2448 return err;
2449}
2450
2451static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2452 u64 in_param, u64 *out_param)
2453{
2454 int cqn;
2455 int err;
2456
2457 switch (op) {
2458 case RES_OP_RESERVE_AND_MAP:
2459 cqn = get_param_l(&in_param);
2460 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2461 if (err)
2462 break;
2463
146f3ef4 2464 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
c82e9aa0
EC
2465 __mlx4_cq_free_icm(dev, cqn);
2466 break;
2467
2468 default:
2469 err = -EINVAL;
2470 break;
2471 }
2472
2473 return err;
2474}
2475
2476static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2477 u64 in_param, u64 *out_param)
2478{
2479 int srqn;
2480 int err;
2481
2482 switch (op) {
2483 case RES_OP_RESERVE_AND_MAP:
2484 srqn = get_param_l(&in_param);
2485 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2486 if (err)
2487 break;
2488
146f3ef4 2489 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
c82e9aa0
EC
2490 __mlx4_srq_free_icm(dev, srqn);
2491 break;
2492
2493 default:
2494 err = -EINVAL;
2495 break;
2496 }
2497
2498 return err;
2499}
2500
2501static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2502 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
2503{
2504 int port;
2505 int err = 0;
2506
2507 switch (op) {
2508 case RES_OP_RESERVE_AND_MAP:
acddd5dd 2509 port = !in_port ? get_param_l(out_param) : in_port;
449fc488
MB
2510 port = mlx4_slave_convert_port(
2511 dev, slave, port);
2512
2513 if (port < 0)
2514 return -EINVAL;
c82e9aa0
EC
2515 mac_del_from_slave(dev, slave, in_param, port);
2516 __mlx4_unregister_mac(dev, port, in_param);
2517 break;
2518 default:
2519 err = -EINVAL;
2520 break;
2521 }
2522
2523 return err;
2524
2525}
2526
ffe455ad 2527static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 2528 u64 in_param, u64 *out_param, int port)
ffe455ad 2529{
2c957ff2
JM
2530 struct mlx4_priv *priv = mlx4_priv(dev);
2531 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
4874080d
JM
2532 int err = 0;
2533
449fc488
MB
2534 port = mlx4_slave_convert_port(
2535 dev, slave, port);
2536
2537 if (port < 0)
2538 return -EINVAL;
4874080d
JM
2539 switch (op) {
2540 case RES_OP_RESERVE_AND_MAP:
2c957ff2
JM
2541 if (slave_state[slave].old_vlan_api)
2542 return 0;
4874080d
JM
2543 if (!port)
2544 return -EINVAL;
2545 vlan_del_from_slave(dev, slave, in_param, port);
2546 __mlx4_unregister_vlan(dev, port, in_param);
2547 break;
2548 default:
2549 err = -EINVAL;
2550 break;
2551 }
2552
2553 return err;
ffe455ad
EE
2554}
2555
ba062d52
JM
2556static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2557 u64 in_param, u64 *out_param)
2558{
2559 int index;
2560 int err;
2561
2562 if (op != RES_OP_RESERVE)
2563 return -EINVAL;
2564
2565 index = get_param_l(&in_param);
9de92c60
EBE
2566 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2567 return 0;
2568
ba062d52
JM
2569 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2570 if (err)
2571 return err;
2572
2573 __mlx4_counter_free(dev, index);
146f3ef4 2574 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52
JM
2575
2576 return err;
2577}
2578
2579static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2580 u64 in_param, u64 *out_param)
2581{
2582 int xrcdn;
2583 int err;
2584
2585 if (op != RES_OP_RESERVE)
2586 return -EINVAL;
2587
2588 xrcdn = get_param_l(&in_param);
2589 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2590 if (err)
2591 return err;
2592
2593 __mlx4_xrcd_free(dev, xrcdn);
2594
2595 return err;
2596}
2597
c82e9aa0
EC
2598int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2599 struct mlx4_vhcr *vhcr,
2600 struct mlx4_cmd_mailbox *inbox,
2601 struct mlx4_cmd_mailbox *outbox,
2602 struct mlx4_cmd_info *cmd)
2603{
2604 int err = -EINVAL;
2605 int alop = vhcr->op_modifier;
2606
acddd5dd 2607 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
2608 case RES_QP:
2609 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2610 vhcr->in_param);
2611 break;
2612
2613 case RES_MTT:
2614 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2615 vhcr->in_param, &vhcr->out_param);
2616 break;
2617
2618 case RES_MPT:
2619 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2620 vhcr->in_param);
2621 break;
2622
2623 case RES_CQ:
2624 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2625 vhcr->in_param, &vhcr->out_param);
2626 break;
2627
2628 case RES_SRQ:
2629 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2630 vhcr->in_param, &vhcr->out_param);
2631 break;
2632
2633 case RES_MAC:
2634 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2635 vhcr->in_param, &vhcr->out_param,
2636 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
2637 break;
2638
ffe455ad
EE
2639 case RES_VLAN:
2640 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
2641 vhcr->in_param, &vhcr->out_param,
2642 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
2643 break;
2644
ba062d52
JM
2645 case RES_COUNTER:
2646 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2647 vhcr->in_param, &vhcr->out_param);
2648 break;
2649
2650 case RES_XRCD:
2651 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2652 vhcr->in_param, &vhcr->out_param);
2653
c82e9aa0
EC
2654 default:
2655 break;
2656 }
2657 return err;
2658}
2659
2660/* ugly but other choices are uglier */
2661static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2662{
2663 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2664}
2665
2b8fb286 2666static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 2667{
2b8fb286 2668 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
2669}
2670
2671static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2672{
2673 return be32_to_cpu(mpt->mtt_sz);
2674}
2675
cc1ade94
SM
2676static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2677{
2678 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2679}
2680
2681static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2682{
2683 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2684}
2685
2686static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2687{
2688 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2689}
2690
2691static int mr_is_region(struct mlx4_mpt_entry *mpt)
2692{
2693 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2694}
2695
2b8fb286 2696static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
2697{
2698 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2699}
2700
2b8fb286 2701static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
2702{
2703 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2704}
2705
2706static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2707{
2708 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2709 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2710 int log_sq_sride = qpc->sq_size_stride & 7;
2711 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2712 int log_rq_stride = qpc->rq_size_stride & 7;
2713 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2714 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
5c5f3f0a
YH
2715 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2716 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
c82e9aa0
EC
2717 int sq_size;
2718 int rq_size;
2719 int total_pages;
2720 int total_mem;
2721 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2722
2723 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2724 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2725 total_mem = sq_size + rq_size;
2726 total_pages =
2727 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2728 page_shift);
2729
2730 return total_pages;
2731}
2732
c82e9aa0
EC
2733static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2734 int size, struct res_mtt *mtt)
2735{
2b8fb286
MA
2736 int res_start = mtt->com.res_id;
2737 int res_size = (1 << mtt->order);
c82e9aa0
EC
2738
2739 if (start < res_start || start + size > res_start + res_size)
2740 return -EPERM;
2741 return 0;
2742}
2743
2744int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2745 struct mlx4_vhcr *vhcr,
2746 struct mlx4_cmd_mailbox *inbox,
2747 struct mlx4_cmd_mailbox *outbox,
2748 struct mlx4_cmd_info *cmd)
2749{
2750 int err;
2751 int index = vhcr->in_modifier;
2752 struct res_mtt *mtt;
8dc7d11f 2753 struct res_mpt *mpt = NULL;
2b8fb286 2754 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2755 int phys;
2756 int id;
cc1ade94
SM
2757 u32 pd;
2758 int pd_slave;
c82e9aa0
EC
2759
2760 id = index & mpt_mask(dev);
2761 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2762 if (err)
2763 return err;
2764
cc1ade94
SM
2765 /* Disable memory windows for VFs. */
2766 if (!mr_is_region(inbox->buf)) {
2767 err = -EPERM;
2768 goto ex_abort;
2769 }
2770
2771 /* Make sure that the PD bits related to the slave id are zeros. */
2772 pd = mr_get_pd(inbox->buf);
2773 pd_slave = (pd >> 17) & 0x7f;
b332068c 2774 if (pd_slave != 0 && --pd_slave != slave) {
cc1ade94
SM
2775 err = -EPERM;
2776 goto ex_abort;
2777 }
2778
2779 if (mr_is_fmr(inbox->buf)) {
2780 /* FMR and Bind Enable are forbidden in slave devices. */
2781 if (mr_is_bind_enabled(inbox->buf)) {
2782 err = -EPERM;
2783 goto ex_abort;
2784 }
2785 /* FMR and Memory Windows are also forbidden. */
2786 if (!mr_is_region(inbox->buf)) {
2787 err = -EPERM;
2788 goto ex_abort;
2789 }
2790 }
2791
c82e9aa0
EC
2792 phys = mr_phys_mpt(inbox->buf);
2793 if (!phys) {
2b8fb286 2794 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2795 if (err)
2796 goto ex_abort;
2797
2798 err = check_mtt_range(dev, slave, mtt_base,
2799 mr_get_mtt_size(inbox->buf), mtt);
2800 if (err)
2801 goto ex_put;
2802
2803 mpt->mtt = mtt;
2804 }
2805
c82e9aa0
EC
2806 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2807 if (err)
2808 goto ex_put;
2809
2810 if (!phys) {
2811 atomic_inc(&mtt->ref_count);
2812 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2813 }
2814
2815 res_end_move(dev, slave, RES_MPT, id);
2816 return 0;
2817
2818ex_put:
2819 if (!phys)
2820 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2821ex_abort:
2822 res_abort_move(dev, slave, RES_MPT, id);
2823
2824 return err;
2825}
2826
2827int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2828 struct mlx4_vhcr *vhcr,
2829 struct mlx4_cmd_mailbox *inbox,
2830 struct mlx4_cmd_mailbox *outbox,
2831 struct mlx4_cmd_info *cmd)
2832{
2833 int err;
2834 int index = vhcr->in_modifier;
2835 struct res_mpt *mpt;
2836 int id;
2837
2838 id = index & mpt_mask(dev);
2839 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2840 if (err)
2841 return err;
2842
2843 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2844 if (err)
2845 goto ex_abort;
2846
2847 if (mpt->mtt)
2848 atomic_dec(&mpt->mtt->ref_count);
2849
2850 res_end_move(dev, slave, RES_MPT, id);
2851 return 0;
2852
2853ex_abort:
2854 res_abort_move(dev, slave, RES_MPT, id);
2855
2856 return err;
2857}
2858
2859int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2860 struct mlx4_vhcr *vhcr,
2861 struct mlx4_cmd_mailbox *inbox,
2862 struct mlx4_cmd_mailbox *outbox,
2863 struct mlx4_cmd_info *cmd)
2864{
2865 int err;
2866 int index = vhcr->in_modifier;
2867 struct res_mpt *mpt;
2868 int id;
2869
2870 id = index & mpt_mask(dev);
2871 err = get_res(dev, slave, id, RES_MPT, &mpt);
2872 if (err)
2873 return err;
2874
e630664c
MB
2875 if (mpt->com.from_state == RES_MPT_MAPPED) {
2876 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2877 * that, the VF must read the MPT. But since the MPT entry memory is not
2878 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2879 * entry contents. To guarantee that the MPT cannot be changed, the driver
2880 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2881 * ownership fofollowing the change. The change here allows the VF to
2882 * perform QUERY_MPT also when the entry is in SW ownership.
2883 */
2884 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2885 &mlx4_priv(dev)->mr_table.dmpt_table,
2886 mpt->key, NULL);
2887
2888 if (NULL == mpt_entry || NULL == outbox->buf) {
2889 err = -EINVAL;
2890 goto out;
2891 }
2892
2893 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2894
2895 err = 0;
2896 } else if (mpt->com.from_state == RES_MPT_HW) {
2897 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2898 } else {
c82e9aa0
EC
2899 err = -EBUSY;
2900 goto out;
2901 }
2902
c82e9aa0
EC
2903
2904out:
2905 put_res(dev, slave, id, RES_MPT);
2906 return err;
2907}
2908
2909static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2910{
2911 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2912}
2913
2914static int qp_get_scqn(struct mlx4_qp_context *qpc)
2915{
2916 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2917}
2918
2919static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2920{
2921 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2922}
2923
54679e14
JM
2924static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2925 struct mlx4_qp_context *context)
2926{
2927 u32 qpn = vhcr->in_modifier & 0xffffff;
2928 u32 qkey = 0;
2929
2930 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2931 return;
2932
2933 /* adjust qkey in qp context */
2934 context->qkey = cpu_to_be32(qkey);
2935}
2936
e5dfbf9a
OG
2937static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2938 struct mlx4_qp_context *qpc,
2939 struct mlx4_cmd_mailbox *inbox);
2940
c82e9aa0
EC
2941int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2942 struct mlx4_vhcr *vhcr,
2943 struct mlx4_cmd_mailbox *inbox,
2944 struct mlx4_cmd_mailbox *outbox,
2945 struct mlx4_cmd_info *cmd)
2946{
2947 int err;
2948 int qpn = vhcr->in_modifier & 0x7fffff;
2949 struct res_mtt *mtt;
2950 struct res_qp *qp;
2951 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 2952 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2953 int mtt_size = qp_get_mtt_size(qpc);
2954 struct res_cq *rcq;
2955 struct res_cq *scq;
2956 int rcqn = qp_get_rcqn(qpc);
2957 int scqn = qp_get_scqn(qpc);
2958 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2960 struct res_srq *srq;
2961 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2962
e5dfbf9a
OG
2963 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2964 if (err)
2965 return err;
2966
c82e9aa0
EC
2967 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2968 if (err)
2969 return err;
2970 qp->local_qpn = local_qpn;
b01978ca 2971 qp->sched_queue = 0;
f0f829bf
RE
2972 qp->param3 = 0;
2973 qp->vlan_control = 0;
2974 qp->fvl_rx = 0;
2975 qp->pri_path_fl = 0;
2976 qp->vlan_index = 0;
2977 qp->feup = 0;
b01978ca 2978 qp->qpc_flags = be32_to_cpu(qpc->flags);
c82e9aa0 2979
2b8fb286 2980 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2981 if (err)
2982 goto ex_abort;
2983
2984 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2985 if (err)
2986 goto ex_put_mtt;
2987
c82e9aa0
EC
2988 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2989 if (err)
2990 goto ex_put_mtt;
2991
2992 if (scqn != rcqn) {
2993 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2994 if (err)
2995 goto ex_put_rcq;
2996 } else
2997 scq = rcq;
2998
2999 if (use_srq) {
3000 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3001 if (err)
3002 goto ex_put_scq;
3003 }
3004
54679e14
JM
3005 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3006 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
3007 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3008 if (err)
3009 goto ex_put_srq;
3010 atomic_inc(&mtt->ref_count);
3011 qp->mtt = mtt;
3012 atomic_inc(&rcq->ref_count);
3013 qp->rcq = rcq;
3014 atomic_inc(&scq->ref_count);
3015 qp->scq = scq;
3016
3017 if (scqn != rcqn)
3018 put_res(dev, slave, scqn, RES_CQ);
3019
3020 if (use_srq) {
3021 atomic_inc(&srq->ref_count);
3022 put_res(dev, slave, srqn, RES_SRQ);
3023 qp->srq = srq;
3024 }
7c3945bc
JM
3025
3026 /* Save param3 for dynamic changes from VST back to VGT */
3027 qp->param3 = qpc->param3;
c82e9aa0 3028 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 3029 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
3030 res_end_move(dev, slave, RES_QP, qpn);
3031
3032 return 0;
3033
3034ex_put_srq:
3035 if (use_srq)
3036 put_res(dev, slave, srqn, RES_SRQ);
3037ex_put_scq:
3038 if (scqn != rcqn)
3039 put_res(dev, slave, scqn, RES_CQ);
3040ex_put_rcq:
3041 put_res(dev, slave, rcqn, RES_CQ);
3042ex_put_mtt:
2b8fb286 3043 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
3044ex_abort:
3045 res_abort_move(dev, slave, RES_QP, qpn);
3046
3047 return err;
3048}
3049
2b8fb286 3050static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
3051{
3052 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3053}
3054
3055static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3056{
3057 int log_eq_size = eqc->log_eq_size & 0x1f;
3058 int page_shift = (eqc->log_page_size & 0x3f) + 12;
3059
3060 if (log_eq_size + 5 < page_shift)
3061 return 1;
3062
3063 return 1 << (log_eq_size + 5 - page_shift);
3064}
3065
2b8fb286 3066static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
3067{
3068 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3069}
3070
3071static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3072{
3073 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3074 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3075
3076 if (log_cq_size + 5 < page_shift)
3077 return 1;
3078
3079 return 1 << (log_cq_size + 5 - page_shift);
3080}
3081
3082int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3083 struct mlx4_vhcr *vhcr,
3084 struct mlx4_cmd_mailbox *inbox,
3085 struct mlx4_cmd_mailbox *outbox,
3086 struct mlx4_cmd_info *cmd)
3087{
3088 int err;
3089 int eqn = vhcr->in_modifier;
2d3c7397 3090 int res_id = (slave << 10) | eqn;
c82e9aa0 3091 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 3092 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
3093 int mtt_size = eq_get_mtt_size(eqc);
3094 struct res_eq *eq;
3095 struct res_mtt *mtt;
3096
3097 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3098 if (err)
3099 return err;
3100 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3101 if (err)
3102 goto out_add;
3103
2b8fb286 3104 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3105 if (err)
3106 goto out_move;
3107
3108 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3109 if (err)
3110 goto out_put;
3111
3112 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3113 if (err)
3114 goto out_put;
3115
3116 atomic_inc(&mtt->ref_count);
3117 eq->mtt = mtt;
3118 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3119 res_end_move(dev, slave, RES_EQ, res_id);
3120 return 0;
3121
3122out_put:
3123 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3124out_move:
3125 res_abort_move(dev, slave, RES_EQ, res_id);
3126out_add:
3127 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3128 return err;
3129}
3130
d475c95b
MB
3131int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3132 struct mlx4_vhcr *vhcr,
3133 struct mlx4_cmd_mailbox *inbox,
3134 struct mlx4_cmd_mailbox *outbox,
3135 struct mlx4_cmd_info *cmd)
3136{
3137 int err;
3138 u8 get = vhcr->op_modifier;
3139
3140 if (get != 1)
3141 return -EPERM;
3142
3143 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3144
3145 return err;
3146}
3147
c82e9aa0
EC
3148static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3149 int len, struct res_mtt **res)
3150{
3151 struct mlx4_priv *priv = mlx4_priv(dev);
3152 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3153 struct res_mtt *mtt;
3154 int err = -EINVAL;
3155
3156 spin_lock_irq(mlx4_tlock(dev));
3157 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3158 com.list) {
3159 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3160 *res = mtt;
3161 mtt->com.from_state = mtt->com.state;
3162 mtt->com.state = RES_MTT_BUSY;
3163 err = 0;
3164 break;
3165 }
3166 }
3167 spin_unlock_irq(mlx4_tlock(dev));
3168
3169 return err;
3170}
3171
54679e14 3172static int verify_qp_parameters(struct mlx4_dev *dev,
99ec41d0 3173 struct mlx4_vhcr *vhcr,
54679e14
JM
3174 struct mlx4_cmd_mailbox *inbox,
3175 enum qp_transition transition, u8 slave)
3176{
3177 u32 qp_type;
99ec41d0 3178 u32 qpn;
54679e14
JM
3179 struct mlx4_qp_context *qp_ctx;
3180 enum mlx4_qp_optpar optpar;
b6ffaeff
JM
3181 int port;
3182 int num_gids;
54679e14
JM
3183
3184 qp_ctx = inbox->buf + 8;
3185 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3186 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3187
fc31e256 3188 if (slave != mlx4_master_func_num(dev)) {
bb428a5c 3189 qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
fc31e256
OG
3190 /* setting QP rate-limit is disallowed for VFs */
3191 if (qp_ctx->rate_limit_params)
3192 return -EPERM;
3193 }
53f33ae2 3194
54679e14
JM
3195 switch (qp_type) {
3196 case MLX4_QP_ST_RC:
b6ffaeff 3197 case MLX4_QP_ST_XRC:
54679e14
JM
3198 case MLX4_QP_ST_UC:
3199 switch (transition) {
3200 case QP_TRANS_INIT2RTR:
3201 case QP_TRANS_RTR2RTS:
3202 case QP_TRANS_RTS2RTS:
3203 case QP_TRANS_SQD2SQD:
3204 case QP_TRANS_SQD2RTS:
baefd701 3205 if (slave != mlx4_master_func_num(dev)) {
b6ffaeff
JM
3206 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3207 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3208 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
449fc488 3209 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
b6ffaeff
JM
3210 else
3211 num_gids = 1;
3212 if (qp_ctx->pri_path.mgid_index >= num_gids)
54679e14 3213 return -EINVAL;
b6ffaeff
JM
3214 }
3215 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3216 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3217 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
449fc488 3218 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
b6ffaeff
JM
3219 else
3220 num_gids = 1;
3221 if (qp_ctx->alt_path.mgid_index >= num_gids)
54679e14 3222 return -EINVAL;
b6ffaeff 3223 }
baefd701 3224 }
54679e14
JM
3225 break;
3226 default:
3227 break;
3228 }
165cb465 3229 break;
54679e14 3230
165cb465
RD
3231 case MLX4_QP_ST_MLX:
3232 qpn = vhcr->in_modifier & 0x7fffff;
3233 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3234 if (transition == QP_TRANS_INIT2RTR &&
3235 slave != mlx4_master_func_num(dev) &&
3236 mlx4_is_qp_reserved(dev, qpn) &&
3237 !mlx4_vf_smi_enabled(dev, slave, port)) {
3238 /* only enabled VFs may create MLX proxy QPs */
3239 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3240 __func__, slave, port);
3241 return -EPERM;
3242 }
54679e14 3243 break;
165cb465 3244
54679e14
JM
3245 default:
3246 break;
3247 }
3248
3249 return 0;
3250}
3251
c82e9aa0
EC
3252int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3253 struct mlx4_vhcr *vhcr,
3254 struct mlx4_cmd_mailbox *inbox,
3255 struct mlx4_cmd_mailbox *outbox,
3256 struct mlx4_cmd_info *cmd)
3257{
3258 struct mlx4_mtt mtt;
3259 __be64 *page_list = inbox->buf;
3260 u64 *pg_list = (u64 *)page_list;
3261 int i;
3262 struct res_mtt *rmtt = NULL;
3263 int start = be64_to_cpu(page_list[0]);
3264 int npages = vhcr->in_modifier;
3265 int err;
3266
3267 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3268 if (err)
3269 return err;
3270
3271 /* Call the SW implementation of write_mtt:
3272 * - Prepare a dummy mtt struct
dbedd44e 3273 * - Translate inbox contents to simple addresses in host endianness */
2b8fb286
MA
3274 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3275 we don't really use it */
c82e9aa0
EC
3276 mtt.order = 0;
3277 mtt.page_shift = 0;
3278 for (i = 0; i < npages; ++i)
3279 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3280
3281 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3282 ((u64 *)page_list + 2));
3283
3284 if (rmtt)
3285 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3286
3287 return err;
3288}
3289
3290int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3291 struct mlx4_vhcr *vhcr,
3292 struct mlx4_cmd_mailbox *inbox,
3293 struct mlx4_cmd_mailbox *outbox,
3294 struct mlx4_cmd_info *cmd)
3295{
3296 int eqn = vhcr->in_modifier;
2d3c7397 3297 int res_id = eqn | (slave << 10);
c82e9aa0
EC
3298 struct res_eq *eq;
3299 int err;
3300
3301 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3302 if (err)
3303 return err;
3304
3305 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3306 if (err)
3307 goto ex_abort;
3308
3309 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3310 if (err)
3311 goto ex_put;
3312
3313 atomic_dec(&eq->mtt->ref_count);
3314 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3315 res_end_move(dev, slave, RES_EQ, res_id);
3316 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3317
3318 return 0;
3319
3320ex_put:
3321 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3322ex_abort:
3323 res_abort_move(dev, slave, RES_EQ, res_id);
3324
3325 return err;
3326}
3327
3328int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3329{
3330 struct mlx4_priv *priv = mlx4_priv(dev);
3331 struct mlx4_slave_event_eq_info *event_eq;
3332 struct mlx4_cmd_mailbox *mailbox;
3333 u32 in_modifier = 0;
3334 int err;
3335 int res_id;
3336 struct res_eq *req;
3337
3338 if (!priv->mfunc.master.slave_state)
3339 return -EINVAL;
3340
bffb023a
JM
3341 /* check for slave valid, slave not PF, and slave active */
3342 if (slave < 0 || slave > dev->persist->num_vfs ||
3343 slave == dev->caps.function ||
3344 !priv->mfunc.master.slave_state[slave].active)
3345 return 0;
3346
803143fb 3347 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
3348
3349 /* Create the event only if the slave is registered */
803143fb 3350 if (event_eq->eqn < 0)
c82e9aa0
EC
3351 return 0;
3352
3353 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2d3c7397 3354 res_id = (slave << 10) | event_eq->eqn;
c82e9aa0
EC
3355 err = get_res(dev, slave, res_id, RES_EQ, &req);
3356 if (err)
3357 goto unlock;
3358
3359 if (req->com.from_state != RES_EQ_HW) {
3360 err = -EINVAL;
3361 goto put;
3362 }
3363
3364 mailbox = mlx4_alloc_cmd_mailbox(dev);
3365 if (IS_ERR(mailbox)) {
3366 err = PTR_ERR(mailbox);
3367 goto put;
3368 }
3369
3370 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3371 ++event_eq->token;
3372 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3373 }
3374
3375 memcpy(mailbox->buf, (u8 *) eqe, 28);
3376
2d3c7397 3377 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
c82e9aa0
EC
3378
3379 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3380 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3381 MLX4_CMD_NATIVE);
3382
3383 put_res(dev, slave, res_id, RES_EQ);
3384 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3385 mlx4_free_cmd_mailbox(dev, mailbox);
3386 return err;
3387
3388put:
3389 put_res(dev, slave, res_id, RES_EQ);
3390
3391unlock:
3392 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3393 return err;
3394}
3395
3396int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3397 struct mlx4_vhcr *vhcr,
3398 struct mlx4_cmd_mailbox *inbox,
3399 struct mlx4_cmd_mailbox *outbox,
3400 struct mlx4_cmd_info *cmd)
3401{
3402 int eqn = vhcr->in_modifier;
2d3c7397 3403 int res_id = eqn | (slave << 10);
c82e9aa0
EC
3404 struct res_eq *eq;
3405 int err;
3406
3407 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3408 if (err)
3409 return err;
3410
3411 if (eq->com.from_state != RES_EQ_HW) {
3412 err = -EINVAL;
3413 goto ex_put;
3414 }
3415
3416 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3417
3418ex_put:
3419 put_res(dev, slave, res_id, RES_EQ);
3420 return err;
3421}
3422
3423int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3424 struct mlx4_vhcr *vhcr,
3425 struct mlx4_cmd_mailbox *inbox,
3426 struct mlx4_cmd_mailbox *outbox,
3427 struct mlx4_cmd_info *cmd)
3428{
3429 int err;
3430 int cqn = vhcr->in_modifier;
3431 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 3432 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c1c52db1 3433 struct res_cq *cq = NULL;
c82e9aa0
EC
3434 struct res_mtt *mtt;
3435
3436 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3437 if (err)
3438 return err;
2b8fb286 3439 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3440 if (err)
3441 goto out_move;
3442 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3443 if (err)
3444 goto out_put;
3445 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3446 if (err)
3447 goto out_put;
3448 atomic_inc(&mtt->ref_count);
3449 cq->mtt = mtt;
3450 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3451 res_end_move(dev, slave, RES_CQ, cqn);
3452 return 0;
3453
3454out_put:
3455 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3456out_move:
3457 res_abort_move(dev, slave, RES_CQ, cqn);
3458 return err;
3459}
3460
3461int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3462 struct mlx4_vhcr *vhcr,
3463 struct mlx4_cmd_mailbox *inbox,
3464 struct mlx4_cmd_mailbox *outbox,
3465 struct mlx4_cmd_info *cmd)
3466{
3467 int err;
3468 int cqn = vhcr->in_modifier;
c1c52db1 3469 struct res_cq *cq = NULL;
c82e9aa0
EC
3470
3471 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3472 if (err)
3473 return err;
3474 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3475 if (err)
3476 goto out_move;
3477 atomic_dec(&cq->mtt->ref_count);
3478 res_end_move(dev, slave, RES_CQ, cqn);
3479 return 0;
3480
3481out_move:
3482 res_abort_move(dev, slave, RES_CQ, cqn);
3483 return err;
3484}
3485
3486int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3487 struct mlx4_vhcr *vhcr,
3488 struct mlx4_cmd_mailbox *inbox,
3489 struct mlx4_cmd_mailbox *outbox,
3490 struct mlx4_cmd_info *cmd)
3491{
3492 int cqn = vhcr->in_modifier;
3493 struct res_cq *cq;
3494 int err;
3495
3496 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3497 if (err)
3498 return err;
3499
3500 if (cq->com.from_state != RES_CQ_HW)
3501 goto ex_put;
3502
3503 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3504ex_put:
3505 put_res(dev, slave, cqn, RES_CQ);
3506
3507 return err;
3508}
3509
3510static int handle_resize(struct mlx4_dev *dev, int slave,
3511 struct mlx4_vhcr *vhcr,
3512 struct mlx4_cmd_mailbox *inbox,
3513 struct mlx4_cmd_mailbox *outbox,
3514 struct mlx4_cmd_info *cmd,
3515 struct res_cq *cq)
3516{
3517 int err;
3518 struct res_mtt *orig_mtt;
3519 struct res_mtt *mtt;
3520 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 3521 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
3522
3523 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3524 if (err)
3525 return err;
3526
3527 if (orig_mtt != cq->mtt) {
3528 err = -EINVAL;
3529 goto ex_put;
3530 }
3531
2b8fb286 3532 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3533 if (err)
3534 goto ex_put;
3535
3536 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3537 if (err)
3538 goto ex_put1;
3539 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3540 if (err)
3541 goto ex_put1;
3542 atomic_dec(&orig_mtt->ref_count);
3543 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3544 atomic_inc(&mtt->ref_count);
3545 cq->mtt = mtt;
3546 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3547 return 0;
3548
3549ex_put1:
3550 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3551ex_put:
3552 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3553
3554 return err;
3555
3556}
3557
3558int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3559 struct mlx4_vhcr *vhcr,
3560 struct mlx4_cmd_mailbox *inbox,
3561 struct mlx4_cmd_mailbox *outbox,
3562 struct mlx4_cmd_info *cmd)
3563{
3564 int cqn = vhcr->in_modifier;
3565 struct res_cq *cq;
3566 int err;
3567
3568 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3569 if (err)
3570 return err;
3571
3572 if (cq->com.from_state != RES_CQ_HW)
3573 goto ex_put;
3574
3575 if (vhcr->op_modifier == 0) {
3576 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 3577 goto ex_put;
c82e9aa0
EC
3578 }
3579
3580 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3581ex_put:
3582 put_res(dev, slave, cqn, RES_CQ);
3583
3584 return err;
3585}
3586
c82e9aa0
EC
3587static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3588{
3589 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3590 int log_rq_stride = srqc->logstride & 7;
3591 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3592
3593 if (log_srq_size + log_rq_stride + 4 < page_shift)
3594 return 1;
3595
3596 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3597}
3598
3599int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3600 struct mlx4_vhcr *vhcr,
3601 struct mlx4_cmd_mailbox *inbox,
3602 struct mlx4_cmd_mailbox *outbox,
3603 struct mlx4_cmd_info *cmd)
3604{
3605 int err;
3606 int srqn = vhcr->in_modifier;
3607 struct res_mtt *mtt;
c1c52db1 3608 struct res_srq *srq = NULL;
c82e9aa0 3609 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 3610 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
3611
3612 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3613 return -EINVAL;
3614
3615 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3616 if (err)
3617 return err;
2b8fb286 3618 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
3619 if (err)
3620 goto ex_abort;
3621 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3622 mtt);
3623 if (err)
3624 goto ex_put_mtt;
3625
c82e9aa0
EC
3626 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3627 if (err)
3628 goto ex_put_mtt;
3629
3630 atomic_inc(&mtt->ref_count);
3631 srq->mtt = mtt;
3632 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3633 res_end_move(dev, slave, RES_SRQ, srqn);
3634 return 0;
3635
3636ex_put_mtt:
3637 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3638ex_abort:
3639 res_abort_move(dev, slave, RES_SRQ, srqn);
3640
3641 return err;
3642}
3643
3644int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3645 struct mlx4_vhcr *vhcr,
3646 struct mlx4_cmd_mailbox *inbox,
3647 struct mlx4_cmd_mailbox *outbox,
3648 struct mlx4_cmd_info *cmd)
3649{
3650 int err;
3651 int srqn = vhcr->in_modifier;
c1c52db1 3652 struct res_srq *srq = NULL;
c82e9aa0
EC
3653
3654 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3655 if (err)
3656 return err;
3657 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3658 if (err)
3659 goto ex_abort;
3660 atomic_dec(&srq->mtt->ref_count);
3661 if (srq->cq)
3662 atomic_dec(&srq->cq->ref_count);
3663 res_end_move(dev, slave, RES_SRQ, srqn);
3664
3665 return 0;
3666
3667ex_abort:
3668 res_abort_move(dev, slave, RES_SRQ, srqn);
3669
3670 return err;
3671}
3672
3673int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3674 struct mlx4_vhcr *vhcr,
3675 struct mlx4_cmd_mailbox *inbox,
3676 struct mlx4_cmd_mailbox *outbox,
3677 struct mlx4_cmd_info *cmd)
3678{
3679 int err;
3680 int srqn = vhcr->in_modifier;
3681 struct res_srq *srq;
3682
3683 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3684 if (err)
3685 return err;
3686 if (srq->com.from_state != RES_SRQ_HW) {
3687 err = -EBUSY;
3688 goto out;
3689 }
3690 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3691out:
3692 put_res(dev, slave, srqn, RES_SRQ);
3693 return err;
3694}
3695
3696int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3697 struct mlx4_vhcr *vhcr,
3698 struct mlx4_cmd_mailbox *inbox,
3699 struct mlx4_cmd_mailbox *outbox,
3700 struct mlx4_cmd_info *cmd)
3701{
3702 int err;
3703 int srqn = vhcr->in_modifier;
3704 struct res_srq *srq;
3705
3706 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3707 if (err)
3708 return err;
3709
3710 if (srq->com.from_state != RES_SRQ_HW) {
3711 err = -EBUSY;
3712 goto out;
3713 }
3714
3715 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3716out:
3717 put_res(dev, slave, srqn, RES_SRQ);
3718 return err;
3719}
3720
3721int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3722 struct mlx4_vhcr *vhcr,
3723 struct mlx4_cmd_mailbox *inbox,
3724 struct mlx4_cmd_mailbox *outbox,
3725 struct mlx4_cmd_info *cmd)
3726{
3727 int err;
3728 int qpn = vhcr->in_modifier & 0x7fffff;
3729 struct res_qp *qp;
3730
3731 err = get_res(dev, slave, qpn, RES_QP, &qp);
3732 if (err)
3733 return err;
3734 if (qp->com.from_state != RES_QP_HW) {
3735 err = -EBUSY;
3736 goto out;
3737 }
3738
3739 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3740out:
3741 put_res(dev, slave, qpn, RES_QP);
3742 return err;
3743}
3744
54679e14
JM
3745int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3746 struct mlx4_vhcr *vhcr,
3747 struct mlx4_cmd_mailbox *inbox,
3748 struct mlx4_cmd_mailbox *outbox,
3749 struct mlx4_cmd_info *cmd)
3750{
3751 struct mlx4_qp_context *context = inbox->buf + 8;
3752 adjust_proxy_tun_qkey(dev, vhcr, context);
3753 update_pkey_index(dev, slave, inbox);
3754 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3755}
3756
449fc488
MB
3757static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3758 struct mlx4_qp_context *qpc,
3759 struct mlx4_cmd_mailbox *inbox)
3760{
3761 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3762 u8 pri_sched_queue;
3763 int port = mlx4_slave_convert_port(
3764 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3765
3766 if (port < 0)
3767 return -EINVAL;
3768
3769 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3770 ((port & 1) << 6);
3771
f40e99e9
OG
3772 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3773 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
449fc488
MB
3774 qpc->pri_path.sched_queue = pri_sched_queue;
3775 }
3776
3777 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3778 port = mlx4_slave_convert_port(
3779 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3780 + 1) - 1;
3781 if (port < 0)
3782 return -EINVAL;
3783 qpc->alt_path.sched_queue =
3784 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3785 (port & 1) << 6;
3786 }
3787 return 0;
3788}
3789
2f5bb473
JM
3790static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3791 struct mlx4_qp_context *qpc,
3792 struct mlx4_cmd_mailbox *inbox)
3793{
3794 u64 mac;
3795 int port;
3796 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3797 u8 sched = *(u8 *)(inbox->buf + 64);
3798 u8 smac_ix;
3799
3800 port = (sched >> 6 & 1) + 1;
3801 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3802 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3803 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3804 return -ENOENT;
3805 }
3806 return 0;
3807}
3808
c82e9aa0
EC
3809int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3810 struct mlx4_vhcr *vhcr,
3811 struct mlx4_cmd_mailbox *inbox,
3812 struct mlx4_cmd_mailbox *outbox,
3813 struct mlx4_cmd_info *cmd)
3814{
54679e14 3815 int err;
c82e9aa0 3816 struct mlx4_qp_context *qpc = inbox->buf + 8;
b01978ca
JM
3817 int qpn = vhcr->in_modifier & 0x7fffff;
3818 struct res_qp *qp;
3819 u8 orig_sched_queue;
f0f829bf
RE
3820 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3821 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3822 u8 orig_pri_path_fl = qpc->pri_path.fl;
3823 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3824 u8 orig_feup = qpc->pri_path.feup;
c82e9aa0 3825
449fc488
MB
3826 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3827 if (err)
3828 return err;
99ec41d0 3829 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
54679e14
JM
3830 if (err)
3831 return err;
3832
2f5bb473
JM
3833 if (roce_verify_mac(dev, slave, qpc, inbox))
3834 return -EINVAL;
3835
54679e14
JM
3836 update_pkey_index(dev, slave, inbox);
3837 update_gid(dev, inbox, (u8)slave);
3838 adjust_proxy_tun_qkey(dev, vhcr, qpc);
b01978ca 3839 orig_sched_queue = qpc->pri_path.sched_queue;
54679e14 3840
b01978ca
JM
3841 err = get_res(dev, slave, qpn, RES_QP, &qp);
3842 if (err)
3843 return err;
3844 if (qp->com.from_state != RES_QP_HW) {
3845 err = -EBUSY;
3846 goto out;
3847 }
3848
9a892835
MG
3849 err = update_vport_qp_param(dev, inbox, slave, qpn);
3850 if (err)
3851 goto out;
3852
b01978ca
JM
3853 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3854out:
3855 /* if no error, save sched queue value passed in by VF. This is
3856 * essentially the QOS value provided by the VF. This will be useful
3857 * if we allow dynamic changes from VST back to VGT
3858 */
f0f829bf 3859 if (!err) {
b01978ca 3860 qp->sched_queue = orig_sched_queue;
f0f829bf
RE
3861 qp->vlan_control = orig_vlan_control;
3862 qp->fvl_rx = orig_fvl_rx;
3863 qp->pri_path_fl = orig_pri_path_fl;
3864 qp->vlan_index = orig_vlan_index;
3865 qp->feup = orig_feup;
3866 }
b01978ca
JM
3867 put_res(dev, slave, qpn, RES_QP);
3868 return err;
54679e14
JM
3869}
3870
3871int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3872 struct mlx4_vhcr *vhcr,
3873 struct mlx4_cmd_mailbox *inbox,
3874 struct mlx4_cmd_mailbox *outbox,
3875 struct mlx4_cmd_info *cmd)
3876{
3877 int err;
3878 struct mlx4_qp_context *context = inbox->buf + 8;
3879
449fc488
MB
3880 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3881 if (err)
3882 return err;
99ec41d0 3883 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
54679e14
JM
3884 if (err)
3885 return err;
3886
3887 update_pkey_index(dev, slave, inbox);
3888 update_gid(dev, inbox, (u8)slave);
3889 adjust_proxy_tun_qkey(dev, vhcr, context);
3890 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3891}
3892
3893int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3894 struct mlx4_vhcr *vhcr,
3895 struct mlx4_cmd_mailbox *inbox,
3896 struct mlx4_cmd_mailbox *outbox,
3897 struct mlx4_cmd_info *cmd)
3898{
3899 int err;
3900 struct mlx4_qp_context *context = inbox->buf + 8;
3901
449fc488
MB
3902 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3903 if (err)
3904 return err;
99ec41d0 3905 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
54679e14
JM
3906 if (err)
3907 return err;
3908
3909 update_pkey_index(dev, slave, inbox);
3910 update_gid(dev, inbox, (u8)slave);
3911 adjust_proxy_tun_qkey(dev, vhcr, context);
3912 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3913}
3914
3915
3916int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3917 struct mlx4_vhcr *vhcr,
3918 struct mlx4_cmd_mailbox *inbox,
3919 struct mlx4_cmd_mailbox *outbox,
3920 struct mlx4_cmd_info *cmd)
3921{
3922 struct mlx4_qp_context *context = inbox->buf + 8;
449fc488
MB
3923 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3924 if (err)
3925 return err;
54679e14
JM
3926 adjust_proxy_tun_qkey(dev, vhcr, context);
3927 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3928}
3929
3930int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3931 struct mlx4_vhcr *vhcr,
3932 struct mlx4_cmd_mailbox *inbox,
3933 struct mlx4_cmd_mailbox *outbox,
3934 struct mlx4_cmd_info *cmd)
3935{
3936 int err;
3937 struct mlx4_qp_context *context = inbox->buf + 8;
3938
449fc488
MB
3939 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3940 if (err)
3941 return err;
99ec41d0 3942 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
54679e14
JM
3943 if (err)
3944 return err;
3945
3946 adjust_proxy_tun_qkey(dev, vhcr, context);
3947 update_gid(dev, inbox, (u8)slave);
3948 update_pkey_index(dev, slave, inbox);
3949 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3950}
3951
3952int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3953 struct mlx4_vhcr *vhcr,
3954 struct mlx4_cmd_mailbox *inbox,
3955 struct mlx4_cmd_mailbox *outbox,
3956 struct mlx4_cmd_info *cmd)
3957{
3958 int err;
3959 struct mlx4_qp_context *context = inbox->buf + 8;
3960
449fc488
MB
3961 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3962 if (err)
3963 return err;
99ec41d0 3964 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
54679e14
JM
3965 if (err)
3966 return err;
c82e9aa0 3967
54679e14
JM
3968 adjust_proxy_tun_qkey(dev, vhcr, context);
3969 update_gid(dev, inbox, (u8)slave);
3970 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
3971 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3972}
3973
3974int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3975 struct mlx4_vhcr *vhcr,
3976 struct mlx4_cmd_mailbox *inbox,
3977 struct mlx4_cmd_mailbox *outbox,
3978 struct mlx4_cmd_info *cmd)
3979{
3980 int err;
3981 int qpn = vhcr->in_modifier & 0x7fffff;
3982 struct res_qp *qp;
3983
3984 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3985 if (err)
3986 return err;
3987 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3988 if (err)
3989 goto ex_abort;
3990
3991 atomic_dec(&qp->mtt->ref_count);
3992 atomic_dec(&qp->rcq->ref_count);
3993 atomic_dec(&qp->scq->ref_count);
3994 if (qp->srq)
3995 atomic_dec(&qp->srq->ref_count);
3996 res_end_move(dev, slave, RES_QP, qpn);
3997 return 0;
3998
3999ex_abort:
4000 res_abort_move(dev, slave, RES_QP, qpn);
4001
4002 return err;
4003}
4004
4005static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4006 struct res_qp *rqp, u8 *gid)
4007{
4008 struct res_gid *res;
4009
4010 list_for_each_entry(res, &rqp->mcg_list, list) {
4011 if (!memcmp(res->gid, gid, 16))
4012 return res;
4013 }
4014 return NULL;
4015}
4016
4017static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 4018 u8 *gid, enum mlx4_protocol prot,
fab1e24a 4019 enum mlx4_steer_type steer, u64 reg_id)
c82e9aa0
EC
4020{
4021 struct res_gid *res;
4022 int err;
4023
31975e27 4024 res = kzalloc(sizeof(*res), GFP_KERNEL);
c82e9aa0
EC
4025 if (!res)
4026 return -ENOMEM;
4027
4028 spin_lock_irq(&rqp->mcg_spl);
4029 if (find_gid(dev, slave, rqp, gid)) {
4030 kfree(res);
4031 err = -EEXIST;
4032 } else {
4033 memcpy(res->gid, gid, 16);
4034 res->prot = prot;
9f5b6c63 4035 res->steer = steer;
fab1e24a 4036 res->reg_id = reg_id;
c82e9aa0
EC
4037 list_add_tail(&res->list, &rqp->mcg_list);
4038 err = 0;
4039 }
4040 spin_unlock_irq(&rqp->mcg_spl);
4041
4042 return err;
4043}
4044
4045static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 4046 u8 *gid, enum mlx4_protocol prot,
fab1e24a 4047 enum mlx4_steer_type steer, u64 *reg_id)
c82e9aa0
EC
4048{
4049 struct res_gid *res;
4050 int err;
4051
4052 spin_lock_irq(&rqp->mcg_spl);
4053 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 4054 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
4055 err = -EINVAL;
4056 else {
fab1e24a 4057 *reg_id = res->reg_id;
c82e9aa0
EC
4058 list_del(&res->list);
4059 kfree(res);
4060 err = 0;
4061 }
4062 spin_unlock_irq(&rqp->mcg_spl);
4063
4064 return err;
4065}
4066
449fc488
MB
4067static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4068 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
fab1e24a
HHZ
4069 enum mlx4_steer_type type, u64 *reg_id)
4070{
4071 switch (dev->caps.steering_mode) {
449fc488
MB
4072 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4073 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4074 if (port < 0)
4075 return port;
4076 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
fab1e24a
HHZ
4077 block_loopback, prot,
4078 reg_id);
449fc488 4079 }
fab1e24a 4080 case MLX4_STEERING_MODE_B0:
449fc488
MB
4081 if (prot == MLX4_PROT_ETH) {
4082 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4083 if (port < 0)
4084 return port;
4085 gid[5] = port;
4086 }
fab1e24a
HHZ
4087 return mlx4_qp_attach_common(dev, qp, gid,
4088 block_loopback, prot, type);
4089 default:
4090 return -EINVAL;
4091 }
4092}
4093
449fc488
MB
4094static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4095 u8 gid[16], enum mlx4_protocol prot,
4096 enum mlx4_steer_type type, u64 reg_id)
fab1e24a
HHZ
4097{
4098 switch (dev->caps.steering_mode) {
4099 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4100 return mlx4_flow_detach(dev, reg_id);
4101 case MLX4_STEERING_MODE_B0:
4102 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4103 default:
4104 return -EINVAL;
4105 }
4106}
4107
531d9014
JM
4108static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4109 u8 *gid, enum mlx4_protocol prot)
4110{
4111 int real_port;
4112
4113 if (prot != MLX4_PROT_ETH)
4114 return 0;
4115
4116 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4117 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4118 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4119 if (real_port < 0)
4120 return -EINVAL;
4121 gid[5] = real_port;
4122 }
4123
4124 return 0;
4125}
4126
c82e9aa0
EC
4127int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4128 struct mlx4_vhcr *vhcr,
4129 struct mlx4_cmd_mailbox *inbox,
4130 struct mlx4_cmd_mailbox *outbox,
4131 struct mlx4_cmd_info *cmd)
4132{
4133 struct mlx4_qp qp; /* dummy for calling attach/detach */
4134 u8 *gid = inbox->buf;
4135 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 4136 int err;
c82e9aa0
EC
4137 int qpn;
4138 struct res_qp *rqp;
fab1e24a 4139 u64 reg_id = 0;
c82e9aa0
EC
4140 int attach = vhcr->op_modifier;
4141 int block_loopback = vhcr->in_modifier >> 31;
4142 u8 steer_type_mask = 2;
75c6062c 4143 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
4144
4145 qpn = vhcr->in_modifier & 0xffffff;
4146 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4147 if (err)
4148 return err;
4149
4150 qp.qpn = qpn;
4151 if (attach) {
449fc488 4152 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
fab1e24a
HHZ
4153 type, &reg_id);
4154 if (err) {
4155 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
c82e9aa0 4156 goto ex_put;
fab1e24a
HHZ
4157 }
4158 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
c82e9aa0 4159 if (err)
fab1e24a 4160 goto ex_detach;
c82e9aa0 4161 } else {
531d9014
JM
4162 err = mlx4_adjust_port(dev, slave, gid, prot);
4163 if (err)
4164 goto ex_put;
4165
fab1e24a 4166 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
c82e9aa0
EC
4167 if (err)
4168 goto ex_put;
c82e9aa0 4169
fab1e24a
HHZ
4170 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4171 if (err)
4172 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4173 qpn, reg_id);
4174 }
c82e9aa0 4175 put_res(dev, slave, qpn, RES_QP);
fab1e24a 4176 return err;
c82e9aa0 4177
fab1e24a
HHZ
4178ex_detach:
4179 qp_detach(dev, &qp, gid, prot, type, reg_id);
c82e9aa0
EC
4180ex_put:
4181 put_res(dev, slave, qpn, RES_QP);
c82e9aa0
EC
4182 return err;
4183}
4184
7fb40f87
HHZ
4185/*
4186 * MAC validation for Flow Steering rules.
4187 * VF can attach rules only with a mac address which is assigned to it.
4188 */
4189static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4190 struct list_head *rlist)
4191{
4192 struct mac_res *res, *tmp;
4193 __be64 be_mac;
4194
4195 /* make sure it isn't multicast or broadcast mac*/
4196 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4197 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4198 list_for_each_entry_safe(res, tmp, rlist, list) {
4199 be_mac = cpu_to_be64(res->mac << 16);
c0623e58 4200 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
7fb40f87
HHZ
4201 return 0;
4202 }
4203 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4204 eth_header->eth.dst_mac, slave);
4205 return -EINVAL;
4206 }
4207 return 0;
4208}
4209
4210/*
4211 * In case of missing eth header, append eth header with a MAC address
4212 * assigned to the VF.
4213 */
4214static int add_eth_header(struct mlx4_dev *dev, int slave,
4215 struct mlx4_cmd_mailbox *inbox,
4216 struct list_head *rlist, int header_id)
4217{
4218 struct mac_res *res, *tmp;
4219 u8 port;
4220 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4221 struct mlx4_net_trans_rule_hw_eth *eth_header;
4222 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4223 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4224 __be64 be_mac = 0;
4225 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4226
4227 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
015465f8 4228 port = ctrl->port;
7fb40f87
HHZ
4229 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4230
4231 /* Clear a space in the inbox for eth header */
4232 switch (header_id) {
4233 case MLX4_NET_TRANS_RULE_ID_IPV4:
4234 ip_header =
4235 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4236 memmove(ip_header, eth_header,
4237 sizeof(*ip_header) + sizeof(*l4_header));
4238 break;
4239 case MLX4_NET_TRANS_RULE_ID_TCP:
4240 case MLX4_NET_TRANS_RULE_ID_UDP:
4241 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4242 (eth_header + 1);
4243 memmove(l4_header, eth_header, sizeof(*l4_header));
4244 break;
4245 default:
4246 return -EINVAL;
4247 }
4248 list_for_each_entry_safe(res, tmp, rlist, list) {
4249 if (port == res->port) {
4250 be_mac = cpu_to_be64(res->mac << 16);
4251 break;
4252 }
4253 }
4254 if (!be_mac) {
1a91de28 4255 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
7fb40f87
HHZ
4256 port);
4257 return -EINVAL;
4258 }
4259
4260 memset(eth_header, 0, sizeof(*eth_header));
4261 eth_header->size = sizeof(*eth_header) >> 2;
4262 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4263 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4264 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4265
4266 return 0;
4267
4268}
4269
9a892835
MG
4270#define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4271 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4272 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
ce8d9e0d
MB
4273int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4274 struct mlx4_vhcr *vhcr,
4275 struct mlx4_cmd_mailbox *inbox,
4276 struct mlx4_cmd_mailbox *outbox,
4277 struct mlx4_cmd_info *cmd_info)
4278{
4279 int err;
4280 u32 qpn = vhcr->in_modifier & 0xffffff;
4281 struct res_qp *rqp;
4282 u64 mac;
4283 unsigned port;
4284 u64 pri_addr_path_mask;
4285 struct mlx4_update_qp_context *cmd;
4286 int smac_index;
4287
4288 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4289
4290 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4291 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4292 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4293 return -EPERM;
4294
9a892835
MG
4295 if ((pri_addr_path_mask &
4296 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4297 !(dev->caps.flags2 &
4298 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
5d4de16c
CJ
4299 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4300 slave);
423b3aec 4301 return -EOPNOTSUPP;
9a892835
MG
4302 }
4303
ce8d9e0d
MB
4304 /* Just change the smac for the QP */
4305 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4306 if (err) {
4307 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4308 return err;
4309 }
4310
4311 port = (rqp->sched_queue >> 6 & 1) + 1;
b7834758
MB
4312
4313 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4314 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4315 err = mac_find_smac_ix_in_slave(dev, slave, port,
4316 smac_index, &mac);
4317
4318 if (err) {
4319 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4320 qpn, smac_index);
4321 goto err_mac;
4322 }
ce8d9e0d
MB
4323 }
4324
4325 err = mlx4_cmd(dev, inbox->dma,
4326 vhcr->in_modifier, 0,
4327 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4328 MLX4_CMD_NATIVE);
4329 if (err) {
4330 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4331 goto err_mac;
4332 }
4333
4334err_mac:
4335 put_res(dev, slave, qpn, RES_QP);
4336 return err;
4337}
4338
78efed27
MS
4339static u32 qp_attach_mbox_size(void *mbox)
4340{
4341 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4342 struct _rule_hw *rule_header;
4343
4344 rule_header = (struct _rule_hw *)(mbox + size);
4345
4346 while (rule_header->size) {
4347 size += rule_header->size * sizeof(u32);
4348 rule_header += 1;
4349 }
4350 return size;
4351}
4352
4353static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4354
8fcfb4db
HHZ
4355int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4356 struct mlx4_vhcr *vhcr,
4357 struct mlx4_cmd_mailbox *inbox,
4358 struct mlx4_cmd_mailbox *outbox,
4359 struct mlx4_cmd_info *cmd)
4360{
7fb40f87
HHZ
4361
4362 struct mlx4_priv *priv = mlx4_priv(dev);
4363 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4364 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
1b9c6b06 4365 int err;
a9c01e7a 4366 int qpn;
2c473ae7 4367 struct res_qp *rqp;
7fb40f87
HHZ
4368 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4369 struct _rule_hw *rule_header;
4370 int header_id;
78efed27
MS
4371 struct res_fs_rule *rrule;
4372 u32 mbox_size;
1b9c6b06 4373
0ff1fb65
HHZ
4374 if (dev->caps.steering_mode !=
4375 MLX4_STEERING_MODE_DEVICE_MANAGED)
4376 return -EOPNOTSUPP;
1b9c6b06 4377
7fb40f87 4378 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
2b2b31c8
AH
4379 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4380 if (err <= 0)
449fc488 4381 return -EINVAL;
2b2b31c8 4382 ctrl->port = err;
a9c01e7a 4383 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
2c473ae7 4384 err = get_res(dev, slave, qpn, RES_QP, &rqp);
a9c01e7a 4385 if (err) {
1a91de28 4386 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
a9c01e7a
HHZ
4387 return err;
4388 }
7fb40f87
HHZ
4389 rule_header = (struct _rule_hw *)(ctrl + 1);
4390 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4391
48564135 4392 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
10b1c04e 4393 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
48564135 4394
7fb40f87
HHZ
4395 switch (header_id) {
4396 case MLX4_NET_TRANS_RULE_ID_ETH:
a9c01e7a
HHZ
4397 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4398 err = -EINVAL;
78efed27 4399 goto err_put_qp;
a9c01e7a 4400 }
7fb40f87 4401 break;
60396683
JM
4402 case MLX4_NET_TRANS_RULE_ID_IB:
4403 break;
7fb40f87
HHZ
4404 case MLX4_NET_TRANS_RULE_ID_IPV4:
4405 case MLX4_NET_TRANS_RULE_ID_TCP:
4406 case MLX4_NET_TRANS_RULE_ID_UDP:
1a91de28 4407 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
a9c01e7a
HHZ
4408 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4409 err = -EINVAL;
78efed27 4410 goto err_put_qp;
a9c01e7a 4411 }
7fb40f87
HHZ
4412 vhcr->in_modifier +=
4413 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4414 break;
4415 default:
1a91de28 4416 pr_err("Corrupted mailbox\n");
a9c01e7a 4417 err = -EINVAL;
78efed27 4418 goto err_put_qp;
7fb40f87
HHZ
4419 }
4420
1b9c6b06
HHZ
4421 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4422 vhcr->in_modifier, 0,
4423 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4424 MLX4_CMD_NATIVE);
4425 if (err)
78efed27
MS
4426 goto err_put_qp;
4427
1b9c6b06 4428
2c473ae7 4429 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
1b9c6b06 4430 if (err) {
1a91de28 4431 mlx4_err(dev, "Fail to add flow steering resources\n");
78efed27
MS
4432 goto err_detach;
4433 }
4434
4435 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4436 if (err)
4437 goto err_detach;
4438
4439 mbox_size = qp_attach_mbox_size(inbox->buf);
4440 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4441 if (!rrule->mirr_mbox) {
4442 err = -ENOMEM;
4443 goto err_put_rule;
4444 }
4445 rrule->mirr_mbox_size = mbox_size;
4446 rrule->mirr_rule_id = 0;
4447 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4448
4449 /* set different port */
4450 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4451 if (ctrl->port == 1)
4452 ctrl->port = 2;
4453 else
4454 ctrl->port = 1;
4455
4456 if (mlx4_is_bonded(dev))
4457 mlx4_do_mirror_rule(dev, rrule);
4458
4459 atomic_inc(&rqp->ref_count);
4460
4461err_put_rule:
4462 put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4463err_detach:
4464 /* detach rule on error */
4465 if (err)
1b9c6b06 4466 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2065b38b 4467 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1b9c6b06 4468 MLX4_CMD_NATIVE);
78efed27 4469err_put_qp:
a9c01e7a 4470 put_res(dev, slave, qpn, RES_QP);
1b9c6b06 4471 return err;
8fcfb4db
HHZ
4472}
4473
78efed27
MS
4474static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4475{
4476 int err;
4477
4478 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4479 if (err) {
4480 mlx4_err(dev, "Fail to remove flow steering resources\n");
4481 return err;
4482 }
4483
4484 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4485 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4486 return 0;
4487}
4488
8fcfb4db
HHZ
4489int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4490 struct mlx4_vhcr *vhcr,
4491 struct mlx4_cmd_mailbox *inbox,
4492 struct mlx4_cmd_mailbox *outbox,
4493 struct mlx4_cmd_info *cmd)
4494{
1b9c6b06 4495 int err;
2c473ae7
HHZ
4496 struct res_qp *rqp;
4497 struct res_fs_rule *rrule;
78efed27 4498 u64 mirr_reg_id;
3b01fe7f 4499 int qpn;
1b9c6b06 4500
0ff1fb65
HHZ
4501 if (dev->caps.steering_mode !=
4502 MLX4_STEERING_MODE_DEVICE_MANAGED)
4503 return -EOPNOTSUPP;
1b9c6b06 4504
2c473ae7
HHZ
4505 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4506 if (err)
4507 return err;
78efed27
MS
4508
4509 if (!rrule->mirr_mbox) {
4510 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4511 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4512 return -EINVAL;
4513 }
4514 mirr_reg_id = rrule->mirr_rule_id;
4515 kfree(rrule->mirr_mbox);
3b01fe7f 4516 qpn = rrule->qpn;
78efed27 4517
2c473ae7
HHZ
4518 /* Release the rule form busy state before removal */
4519 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3b01fe7f 4520 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2c473ae7
HHZ
4521 if (err)
4522 return err;
4523
78efed27
MS
4524 if (mirr_reg_id && mlx4_is_bonded(dev)) {
4525 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4526 if (err) {
4527 mlx4_err(dev, "Fail to get resource of mirror rule\n");
4528 } else {
4529 put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4530 mlx4_undo_mirror_rule(dev, rrule);
4531 }
4532 }
1b9c6b06
HHZ
4533 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4534 if (err) {
1a91de28 4535 mlx4_err(dev, "Fail to remove flow steering resources\n");
2c473ae7 4536 goto out;
1b9c6b06
HHZ
4537 }
4538
4539 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4540 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4541 MLX4_CMD_NATIVE);
2c473ae7
HHZ
4542 if (!err)
4543 atomic_dec(&rqp->ref_count);
4544out:
3b01fe7f 4545 put_res(dev, slave, qpn, RES_QP);
1b9c6b06 4546 return err;
8fcfb4db
HHZ
4547}
4548
c82e9aa0
EC
4549enum {
4550 BUSY_MAX_RETRIES = 10
4551};
4552
4553int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4554 struct mlx4_vhcr *vhcr,
4555 struct mlx4_cmd_mailbox *inbox,
4556 struct mlx4_cmd_mailbox *outbox,
4557 struct mlx4_cmd_info *cmd)
4558{
4559 int err;
4560 int index = vhcr->in_modifier & 0xffff;
4561
4562 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4563 if (err)
4564 return err;
4565
4566 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4567 put_res(dev, slave, index, RES_COUNTER);
4568 return err;
4569}
4570
4571static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4572{
4573 struct res_gid *rgid;
4574 struct res_gid *tmp;
c82e9aa0
EC
4575 struct mlx4_qp qp; /* dummy for calling attach/detach */
4576
4577 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
fab1e24a
HHZ
4578 switch (dev->caps.steering_mode) {
4579 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4580 mlx4_flow_detach(dev, rgid->reg_id);
4581 break;
4582 case MLX4_STEERING_MODE_B0:
4583 qp.qpn = rqp->local_qpn;
4584 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4585 rgid->prot, rgid->steer);
4586 break;
4587 }
c82e9aa0
EC
4588 list_del(&rgid->list);
4589 kfree(rgid);
4590 }
4591}
4592
4593static int _move_all_busy(struct mlx4_dev *dev, int slave,
4594 enum mlx4_resource type, int print)
4595{
4596 struct mlx4_priv *priv = mlx4_priv(dev);
4597 struct mlx4_resource_tracker *tracker =
4598 &priv->mfunc.master.res_tracker;
4599 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4600 struct res_common *r;
4601 struct res_common *tmp;
4602 int busy;
4603
4604 busy = 0;
4605 spin_lock_irq(mlx4_tlock(dev));
4606 list_for_each_entry_safe(r, tmp, rlist, list) {
4607 if (r->owner == slave) {
4608 if (!r->removing) {
4609 if (r->state == RES_ANY_BUSY) {
4610 if (print)
4611 mlx4_dbg(dev,
aa1ec3dd 4612 "%s id 0x%llx is busy\n",
95646373 4613 resource_str(type),
c82e9aa0
EC
4614 r->res_id);
4615 ++busy;
4616 } else {
4617 r->from_state = r->state;
4618 r->state = RES_ANY_BUSY;
4619 r->removing = 1;
4620 }
4621 }
4622 }
4623 }
4624 spin_unlock_irq(mlx4_tlock(dev));
4625
4626 return busy;
4627}
4628
4629static int move_all_busy(struct mlx4_dev *dev, int slave,
4630 enum mlx4_resource type)
4631{
4632 unsigned long begin;
4633 int busy;
4634
4635 begin = jiffies;
4636 do {
4637 busy = _move_all_busy(dev, slave, type, 0);
4638 if (time_after(jiffies, begin + 5 * HZ))
4639 break;
4640 if (busy)
4641 cond_resched();
4642 } while (busy);
4643
4644 if (busy)
4645 busy = _move_all_busy(dev, slave, type, 1);
4646
4647 return busy;
4648}
4649static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4650{
4651 struct mlx4_priv *priv = mlx4_priv(dev);
4652 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4653 struct list_head *qp_list =
4654 &tracker->slave_list[slave].res_list[RES_QP];
4655 struct res_qp *qp;
4656 struct res_qp *tmp;
4657 int state;
4658 u64 in_param;
4659 int qpn;
4660 int err;
4661
4662 err = move_all_busy(dev, slave, RES_QP);
4663 if (err)
1a91de28
JP
4664 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4665 slave);
c82e9aa0
EC
4666
4667 spin_lock_irq(mlx4_tlock(dev));
4668 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4669 spin_unlock_irq(mlx4_tlock(dev));
4670 if (qp->com.owner == slave) {
4671 qpn = qp->com.res_id;
4672 detach_qp(dev, slave, qp);
4673 state = qp->com.from_state;
4674 while (state != 0) {
4675 switch (state) {
4676 case RES_QP_RESERVED:
4677 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4678 rb_erase(&qp->com.node,
4679 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
4680 list_del(&qp->com.list);
4681 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4682 if (!valid_reserved(dev, slave, qpn)) {
4683 __mlx4_qp_release_range(dev, qpn, 1);
4684 mlx4_release_resource(dev, slave,
4685 RES_QP, 1, 0);
4686 }
c82e9aa0
EC
4687 kfree(qp);
4688 state = 0;
4689 break;
4690 case RES_QP_MAPPED:
4691 if (!valid_reserved(dev, slave, qpn))
4692 __mlx4_qp_free_icm(dev, qpn);
4693 state = RES_QP_RESERVED;
4694 break;
4695 case RES_QP_HW:
4696 in_param = slave;
4697 err = mlx4_cmd(dev, in_param,
4698 qp->local_qpn, 2,
4699 MLX4_CMD_2RST_QP,
4700 MLX4_CMD_TIME_CLASS_A,
4701 MLX4_CMD_NATIVE);
4702 if (err)
1a91de28
JP
4703 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4704 slave, qp->local_qpn);
c82e9aa0
EC
4705 atomic_dec(&qp->rcq->ref_count);
4706 atomic_dec(&qp->scq->ref_count);
4707 atomic_dec(&qp->mtt->ref_count);
4708 if (qp->srq)
4709 atomic_dec(&qp->srq->ref_count);
4710 state = RES_QP_MAPPED;
4711 break;
4712 default:
4713 state = 0;
4714 }
4715 }
4716 }
4717 spin_lock_irq(mlx4_tlock(dev));
4718 }
4719 spin_unlock_irq(mlx4_tlock(dev));
4720}
4721
4722static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4723{
4724 struct mlx4_priv *priv = mlx4_priv(dev);
4725 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4726 struct list_head *srq_list =
4727 &tracker->slave_list[slave].res_list[RES_SRQ];
4728 struct res_srq *srq;
4729 struct res_srq *tmp;
4730 int state;
4731 u64 in_param;
4732 LIST_HEAD(tlist);
4733 int srqn;
4734 int err;
4735
4736 err = move_all_busy(dev, slave, RES_SRQ);
4737 if (err)
1a91de28
JP
4738 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4739 slave);
c82e9aa0
EC
4740
4741 spin_lock_irq(mlx4_tlock(dev));
4742 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4743 spin_unlock_irq(mlx4_tlock(dev));
4744 if (srq->com.owner == slave) {
4745 srqn = srq->com.res_id;
4746 state = srq->com.from_state;
4747 while (state != 0) {
4748 switch (state) {
4749 case RES_SRQ_ALLOCATED:
4750 __mlx4_srq_free_icm(dev, srqn);
4751 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4752 rb_erase(&srq->com.node,
4753 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
4754 list_del(&srq->com.list);
4755 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4756 mlx4_release_resource(dev, slave,
4757 RES_SRQ, 1, 0);
c82e9aa0
EC
4758 kfree(srq);
4759 state = 0;
4760 break;
4761
4762 case RES_SRQ_HW:
4763 in_param = slave;
4764 err = mlx4_cmd(dev, in_param, srqn, 1,
4765 MLX4_CMD_HW2SW_SRQ,
4766 MLX4_CMD_TIME_CLASS_A,
4767 MLX4_CMD_NATIVE);
4768 if (err)
1a91de28 4769 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
c82e9aa0
EC
4770 slave, srqn);
4771
4772 atomic_dec(&srq->mtt->ref_count);
4773 if (srq->cq)
4774 atomic_dec(&srq->cq->ref_count);
4775 state = RES_SRQ_ALLOCATED;
4776 break;
4777
4778 default:
4779 state = 0;
4780 }
4781 }
4782 }
4783 spin_lock_irq(mlx4_tlock(dev));
4784 }
4785 spin_unlock_irq(mlx4_tlock(dev));
4786}
4787
4788static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4789{
4790 struct mlx4_priv *priv = mlx4_priv(dev);
4791 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4792 struct list_head *cq_list =
4793 &tracker->slave_list[slave].res_list[RES_CQ];
4794 struct res_cq *cq;
4795 struct res_cq *tmp;
4796 int state;
4797 u64 in_param;
4798 LIST_HEAD(tlist);
4799 int cqn;
4800 int err;
4801
4802 err = move_all_busy(dev, slave, RES_CQ);
4803 if (err)
1a91de28
JP
4804 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4805 slave);
c82e9aa0
EC
4806
4807 spin_lock_irq(mlx4_tlock(dev));
4808 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4809 spin_unlock_irq(mlx4_tlock(dev));
4810 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4811 cqn = cq->com.res_id;
4812 state = cq->com.from_state;
4813 while (state != 0) {
4814 switch (state) {
4815 case RES_CQ_ALLOCATED:
4816 __mlx4_cq_free_icm(dev, cqn);
4817 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4818 rb_erase(&cq->com.node,
4819 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
4820 list_del(&cq->com.list);
4821 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4822 mlx4_release_resource(dev, slave,
4823 RES_CQ, 1, 0);
c82e9aa0
EC
4824 kfree(cq);
4825 state = 0;
4826 break;
4827
4828 case RES_CQ_HW:
4829 in_param = slave;
4830 err = mlx4_cmd(dev, in_param, cqn, 1,
4831 MLX4_CMD_HW2SW_CQ,
4832 MLX4_CMD_TIME_CLASS_A,
4833 MLX4_CMD_NATIVE);
4834 if (err)
1a91de28 4835 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
c82e9aa0
EC
4836 slave, cqn);
4837 atomic_dec(&cq->mtt->ref_count);
4838 state = RES_CQ_ALLOCATED;
4839 break;
4840
4841 default:
4842 state = 0;
4843 }
4844 }
4845 }
4846 spin_lock_irq(mlx4_tlock(dev));
4847 }
4848 spin_unlock_irq(mlx4_tlock(dev));
4849}
4850
4851static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4852{
4853 struct mlx4_priv *priv = mlx4_priv(dev);
4854 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4855 struct list_head *mpt_list =
4856 &tracker->slave_list[slave].res_list[RES_MPT];
4857 struct res_mpt *mpt;
4858 struct res_mpt *tmp;
4859 int state;
4860 u64 in_param;
4861 LIST_HEAD(tlist);
4862 int mptn;
4863 int err;
4864
4865 err = move_all_busy(dev, slave, RES_MPT);
4866 if (err)
1a91de28
JP
4867 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4868 slave);
c82e9aa0
EC
4869
4870 spin_lock_irq(mlx4_tlock(dev));
4871 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4872 spin_unlock_irq(mlx4_tlock(dev));
4873 if (mpt->com.owner == slave) {
4874 mptn = mpt->com.res_id;
4875 state = mpt->com.from_state;
4876 while (state != 0) {
4877 switch (state) {
4878 case RES_MPT_RESERVED:
b20e519a 4879 __mlx4_mpt_release(dev, mpt->key);
c82e9aa0 4880 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4881 rb_erase(&mpt->com.node,
4882 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
4883 list_del(&mpt->com.list);
4884 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4885 mlx4_release_resource(dev, slave,
4886 RES_MPT, 1, 0);
c82e9aa0
EC
4887 kfree(mpt);
4888 state = 0;
4889 break;
4890
4891 case RES_MPT_MAPPED:
b20e519a 4892 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
4893 state = RES_MPT_RESERVED;
4894 break;
4895
4896 case RES_MPT_HW:
4897 in_param = slave;
4898 err = mlx4_cmd(dev, in_param, mptn, 0,
4899 MLX4_CMD_HW2SW_MPT,
4900 MLX4_CMD_TIME_CLASS_A,
4901 MLX4_CMD_NATIVE);
4902 if (err)
1a91de28 4903 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
c82e9aa0
EC
4904 slave, mptn);
4905 if (mpt->mtt)
4906 atomic_dec(&mpt->mtt->ref_count);
4907 state = RES_MPT_MAPPED;
4908 break;
4909 default:
4910 state = 0;
4911 }
4912 }
4913 }
4914 spin_lock_irq(mlx4_tlock(dev));
4915 }
4916 spin_unlock_irq(mlx4_tlock(dev));
4917}
4918
4919static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4920{
4921 struct mlx4_priv *priv = mlx4_priv(dev);
4922 struct mlx4_resource_tracker *tracker =
4923 &priv->mfunc.master.res_tracker;
4924 struct list_head *mtt_list =
4925 &tracker->slave_list[slave].res_list[RES_MTT];
4926 struct res_mtt *mtt;
4927 struct res_mtt *tmp;
4928 int state;
4929 LIST_HEAD(tlist);
4930 int base;
4931 int err;
4932
4933 err = move_all_busy(dev, slave, RES_MTT);
4934 if (err)
1a91de28
JP
4935 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4936 slave);
c82e9aa0
EC
4937
4938 spin_lock_irq(mlx4_tlock(dev));
4939 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4940 spin_unlock_irq(mlx4_tlock(dev));
4941 if (mtt->com.owner == slave) {
4942 base = mtt->com.res_id;
4943 state = mtt->com.from_state;
4944 while (state != 0) {
4945 switch (state) {
4946 case RES_MTT_ALLOCATED:
4947 __mlx4_free_mtt_range(dev, base,
4948 mtt->order);
4949 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
4950 rb_erase(&mtt->com.node,
4951 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
4952 list_del(&mtt->com.list);
4953 spin_unlock_irq(mlx4_tlock(dev));
146f3ef4
JM
4954 mlx4_release_resource(dev, slave, RES_MTT,
4955 1 << mtt->order, 0);
c82e9aa0
EC
4956 kfree(mtt);
4957 state = 0;
4958 break;
4959
4960 default:
4961 state = 0;
4962 }
4963 }
4964 }
4965 spin_lock_irq(mlx4_tlock(dev));
4966 }
4967 spin_unlock_irq(mlx4_tlock(dev));
4968}
4969
78efed27
MS
4970static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4971{
4972 struct mlx4_cmd_mailbox *mailbox;
4973 int err;
4974 struct res_fs_rule *mirr_rule;
4975 u64 reg_id;
4976
4977 mailbox = mlx4_alloc_cmd_mailbox(dev);
4978 if (IS_ERR(mailbox))
4979 return PTR_ERR(mailbox);
4980
4981 if (!fs_rule->mirr_mbox) {
4982 mlx4_err(dev, "rule mirroring mailbox is null\n");
4983 return -EINVAL;
4984 }
4985 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4986 err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4987 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4988 MLX4_CMD_NATIVE);
4989 mlx4_free_cmd_mailbox(dev, mailbox);
4990
4991 if (err)
4992 goto err;
4993
4994 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4995 if (err)
4996 goto err_detach;
4997
4998 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4999 if (err)
5000 goto err_rem;
5001
5002 fs_rule->mirr_rule_id = reg_id;
5003 mirr_rule->mirr_rule_id = 0;
5004 mirr_rule->mirr_mbox_size = 0;
5005 mirr_rule->mirr_mbox = NULL;
5006 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5007
5008 return 0;
5009err_rem:
5010 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5011err_detach:
5012 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5013 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5014err:
5015 return err;
5016}
5017
5018static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5019{
5020 struct mlx4_priv *priv = mlx4_priv(dev);
5021 struct mlx4_resource_tracker *tracker =
5022 &priv->mfunc.master.res_tracker;
5023 struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5024 struct rb_node *p;
5025 struct res_fs_rule *fs_rule;
5026 int err = 0;
5027 LIST_HEAD(mirr_list);
5028
5029 for (p = rb_first(root); p; p = rb_next(p)) {
5030 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5031 if ((bond && fs_rule->mirr_mbox_size) ||
5032 (!bond && !fs_rule->mirr_mbox_size))
5033 list_add_tail(&fs_rule->mirr_list, &mirr_list);
5034 }
5035
5036 list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5037 if (bond)
5038 err += mlx4_do_mirror_rule(dev, fs_rule);
5039 else
5040 err += mlx4_undo_mirror_rule(dev, fs_rule);
5041 }
5042 return err;
5043}
5044
5045int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5046{
5047 return mlx4_mirror_fs_rules(dev, true);
5048}
5049
5050int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5051{
5052 return mlx4_mirror_fs_rules(dev, false);
5053}
5054
1b9c6b06
HHZ
5055static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5056{
5057 struct mlx4_priv *priv = mlx4_priv(dev);
5058 struct mlx4_resource_tracker *tracker =
5059 &priv->mfunc.master.res_tracker;
5060 struct list_head *fs_rule_list =
5061 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5062 struct res_fs_rule *fs_rule;
5063 struct res_fs_rule *tmp;
5064 int state;
5065 u64 base;
5066 int err;
5067
5068 err = move_all_busy(dev, slave, RES_FS_RULE);
5069 if (err)
5070 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5071 slave);
5072
5073 spin_lock_irq(mlx4_tlock(dev));
5074 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5075 spin_unlock_irq(mlx4_tlock(dev));
5076 if (fs_rule->com.owner == slave) {
5077 base = fs_rule->com.res_id;
5078 state = fs_rule->com.from_state;
5079 while (state != 0) {
5080 switch (state) {
5081 case RES_FS_RULE_ALLOCATED:
5082 /* detach rule */
5083 err = mlx4_cmd(dev, base, 0, 0,
5084 MLX4_QP_FLOW_STEERING_DETACH,
5085 MLX4_CMD_TIME_CLASS_A,
5086 MLX4_CMD_NATIVE);
5087
5088 spin_lock_irq(mlx4_tlock(dev));
5089 rb_erase(&fs_rule->com.node,
5090 &tracker->res_tree[RES_FS_RULE]);
5091 list_del(&fs_rule->com.list);
5092 spin_unlock_irq(mlx4_tlock(dev));
461d5f1b 5093 kfree(fs_rule->mirr_mbox);
1b9c6b06
HHZ
5094 kfree(fs_rule);
5095 state = 0;
5096 break;
5097
5098 default:
5099 state = 0;
5100 }
5101 }
5102 }
5103 spin_lock_irq(mlx4_tlock(dev));
5104 }
5105 spin_unlock_irq(mlx4_tlock(dev));
5106}
5107
c82e9aa0
EC
5108static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5109{
5110 struct mlx4_priv *priv = mlx4_priv(dev);
5111 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5112 struct list_head *eq_list =
5113 &tracker->slave_list[slave].res_list[RES_EQ];
5114 struct res_eq *eq;
5115 struct res_eq *tmp;
5116 int err;
5117 int state;
5118 LIST_HEAD(tlist);
5119 int eqn;
c82e9aa0
EC
5120
5121 err = move_all_busy(dev, slave, RES_EQ);
5122 if (err)
1a91de28
JP
5123 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5124 slave);
c82e9aa0
EC
5125
5126 spin_lock_irq(mlx4_tlock(dev));
5127 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5128 spin_unlock_irq(mlx4_tlock(dev));
5129 if (eq->com.owner == slave) {
5130 eqn = eq->com.res_id;
5131 state = eq->com.from_state;
5132 while (state != 0) {
5133 switch (state) {
5134 case RES_EQ_RESERVED:
5135 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
5136 rb_erase(&eq->com.node,
5137 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
5138 list_del(&eq->com.list);
5139 spin_unlock_irq(mlx4_tlock(dev));
5140 kfree(eq);
5141 state = 0;
5142 break;
5143
5144 case RES_EQ_HW:
2d3c7397 5145 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
30a5da5b
JM
5146 1, MLX4_CMD_HW2SW_EQ,
5147 MLX4_CMD_TIME_CLASS_A,
5148 MLX4_CMD_NATIVE);
eb71d0d6 5149 if (err)
1a91de28 5150 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
2d3c7397 5151 slave, eqn & 0x3ff);
eb71d0d6
JM
5152 atomic_dec(&eq->mtt->ref_count);
5153 state = RES_EQ_RESERVED;
c82e9aa0
EC
5154 break;
5155
5156 default:
5157 state = 0;
5158 }
5159 }
5160 }
5161 spin_lock_irq(mlx4_tlock(dev));
5162 }
5163 spin_unlock_irq(mlx4_tlock(dev));
5164}
5165
ba062d52
JM
5166static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5167{
5168 struct mlx4_priv *priv = mlx4_priv(dev);
5169 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5170 struct list_head *counter_list =
5171 &tracker->slave_list[slave].res_list[RES_COUNTER];
5172 struct res_counter *counter;
5173 struct res_counter *tmp;
5174 int err;
f5adbfee
EBE
5175 int *counters_arr = NULL;
5176 int i, j;
ba062d52
JM
5177
5178 err = move_all_busy(dev, slave, RES_COUNTER);
5179 if (err)
1a91de28
JP
5180 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5181 slave);
ba062d52 5182
f5adbfee
EBE
5183 counters_arr = kmalloc_array(dev->caps.max_counters,
5184 sizeof(*counters_arr), GFP_KERNEL);
5185 if (!counters_arr)
5186 return;
5187
5188 do {
5189 i = 0;
5190 j = 0;
5191 spin_lock_irq(mlx4_tlock(dev));
5192 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5193 if (counter->com.owner == slave) {
5194 counters_arr[i++] = counter->com.res_id;
5195 rb_erase(&counter->com.node,
5196 &tracker->res_tree[RES_COUNTER]);
5197 list_del(&counter->com.list);
5198 kfree(counter);
5199 }
5200 }
5201 spin_unlock_irq(mlx4_tlock(dev));
5202
5203 while (j < i) {
5204 __mlx4_counter_free(dev, counters_arr[j++]);
146f3ef4 5205 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
ba062d52 5206 }
f5adbfee
EBE
5207 } while (i);
5208
5209 kfree(counters_arr);
ba062d52
JM
5210}
5211
5212static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5213{
5214 struct mlx4_priv *priv = mlx4_priv(dev);
5215 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5216 struct list_head *xrcdn_list =
5217 &tracker->slave_list[slave].res_list[RES_XRCD];
5218 struct res_xrcdn *xrcd;
5219 struct res_xrcdn *tmp;
5220 int err;
5221 int xrcdn;
5222
5223 err = move_all_busy(dev, slave, RES_XRCD);
5224 if (err)
1a91de28
JP
5225 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5226 slave);
ba062d52
JM
5227
5228 spin_lock_irq(mlx4_tlock(dev));
5229 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5230 if (xrcd->com.owner == slave) {
5231 xrcdn = xrcd->com.res_id;
4af1c048 5232 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
5233 list_del(&xrcd->com.list);
5234 kfree(xrcd);
5235 __mlx4_xrcd_free(dev, xrcdn);
5236 }
5237 }
5238 spin_unlock_irq(mlx4_tlock(dev));
5239}
5240
c82e9aa0
EC
5241void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5242{
5243 struct mlx4_priv *priv = mlx4_priv(dev);
111c6094 5244 mlx4_reset_roce_gids(dev, slave);
c82e9aa0 5245 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4874080d 5246 rem_slave_vlans(dev, slave);
c82e9aa0 5247 rem_slave_macs(dev, slave);
80cb0021 5248 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
5249 rem_slave_qps(dev, slave);
5250 rem_slave_srqs(dev, slave);
5251 rem_slave_cqs(dev, slave);
5252 rem_slave_mrs(dev, slave);
5253 rem_slave_eqs(dev, slave);
5254 rem_slave_mtts(dev, slave);
ba062d52
JM
5255 rem_slave_counters(dev, slave);
5256 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
5257 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5258}
b01978ca 5259
269f9883
IS
5260static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5261 struct mlx4_vf_immed_vlan_work *work)
5262{
5263 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5264 ctx->qp_context.qos_vport = work->qos_vport;
5265}
5266
b01978ca
JM
5267void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5268{
5269 struct mlx4_vf_immed_vlan_work *work =
5270 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5271 struct mlx4_cmd_mailbox *mailbox;
5272 struct mlx4_update_qp_context *upd_context;
5273 struct mlx4_dev *dev = &work->priv->dev;
5274 struct mlx4_resource_tracker *tracker =
5275 &work->priv->mfunc.master.res_tracker;
5276 struct list_head *qp_list =
5277 &tracker->slave_list[work->slave].res_list[RES_QP];
5278 struct res_qp *qp;
5279 struct res_qp *tmp;
f0f829bf
RE
5280 u64 qp_path_mask_vlan_ctrl =
5281 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
b01978ca
JM
5282 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5283 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5284 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5285 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
f0f829bf
RE
5286 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5287
5288 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5289 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5290 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
7c3d21c8 5291 (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
f0f829bf
RE
5292 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5293 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5294 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
b01978ca
JM
5295 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5296
5297 int err;
5298 int port, errors = 0;
5299 u8 vlan_control;
5300
5301 if (mlx4_is_slave(dev)) {
5302 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5303 work->slave);
5304 goto out;
5305 }
5306
5307 mailbox = mlx4_alloc_cmd_mailbox(dev);
5308 if (IS_ERR(mailbox))
5309 goto out;
0a6eac24
RE
5310 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5311 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5312 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5313 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5314 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5315 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5316 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5317 else if (!work->vlan_id)
b01978ca
JM
5318 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5319 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
7c3d21c8
MS
5320 else if (work->vlan_proto == htons(ETH_P_8021AD))
5321 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5322 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5323 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5324 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5325 else /* vst 802.1Q */
b01978ca
JM
5326 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5327 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5328 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5329
5330 upd_context = mailbox->buf;
311be98a 5331 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
b01978ca
JM
5332
5333 spin_lock_irq(mlx4_tlock(dev));
5334 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5335 spin_unlock_irq(mlx4_tlock(dev));
5336 if (qp->com.owner == work->slave) {
5337 if (qp->com.from_state != RES_QP_HW ||
5338 !qp->sched_queue || /* no INIT2RTR trans yet */
5339 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5340 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5341 spin_lock_irq(mlx4_tlock(dev));
5342 continue;
5343 }
5344 port = (qp->sched_queue >> 6 & 1) + 1;
5345 if (port != work->port) {
5346 spin_lock_irq(mlx4_tlock(dev));
5347 continue;
5348 }
f0f829bf
RE
5349 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5350 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5351 else
5352 upd_context->primary_addr_path_mask =
5353 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5354 if (work->vlan_id == MLX4_VGT) {
5355 upd_context->qp_context.param3 = qp->param3;
5356 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5357 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5358 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5359 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5360 upd_context->qp_context.pri_path.feup = qp->feup;
5361 upd_context->qp_context.pri_path.sched_queue =
5362 qp->sched_queue;
5363 } else {
5364 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5365 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5366 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5367 upd_context->qp_context.pri_path.fvl_rx =
5368 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5369 upd_context->qp_context.pri_path.fl =
7c3d21c8
MS
5370 qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5371 if (work->vlan_proto == htons(ETH_P_8021AD))
5372 upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5373 else
5374 upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
f0f829bf
RE
5375 upd_context->qp_context.pri_path.feup =
5376 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5377 upd_context->qp_context.pri_path.sched_queue =
5378 qp->sched_queue & 0xC7;
5379 upd_context->qp_context.pri_path.sched_queue |=
5380 ((work->qos & 0x7) << 3);
269f9883
IS
5381
5382 if (dev->caps.flags2 &
5383 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5384 update_qos_vpp(upd_context, work);
f0f829bf 5385 }
b01978ca
JM
5386
5387 err = mlx4_cmd(dev, mailbox->dma,
5388 qp->local_qpn & 0xffffff,
5389 0, MLX4_CMD_UPDATE_QP,
5390 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5391 if (err) {
1a91de28
JP
5392 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5393 work->slave, port, qp->local_qpn, err);
b01978ca
JM
5394 errors++;
5395 }
5396 }
5397 spin_lock_irq(mlx4_tlock(dev));
5398 }
5399 spin_unlock_irq(mlx4_tlock(dev));
5400 mlx4_free_cmd_mailbox(dev, mailbox);
5401
5402 if (errors)
5403 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5404 errors, work->slave, work->port);
5405
5406 /* unregister previous vlan_id if needed and we had no errors
5407 * while updating the QPs
5408 */
5409 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5410 NO_INDX != work->orig_vlan_ix)
5411 __mlx4_unregister_vlan(&work->priv->dev, work->port,
2009d005 5412 work->orig_vlan_id);
b01978ca
JM
5413out:
5414 kfree(work);
5415 return;
5416}