Merge remote-tracking branches 'asoc/topic/cx20442' and 'asoc/topic/davinci' into...
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         int ref_count;
56         u8 smac_index;
57         u8 port;
58 };
59
60 struct vlan_res {
61         struct list_head list;
62         u16 vlan;
63         int ref_count;
64         int vlan_index;
65         u8 port;
66 };
67
68 struct res_common {
69         struct list_head        list;
70         struct rb_node          node;
71         u64                     res_id;
72         int                     owner;
73         int                     state;
74         int                     from_state;
75         int                     to_state;
76         int                     removing;
77 };
78
79 enum {
80         RES_ANY_BUSY = 1
81 };
82
83 struct res_gid {
84         struct list_head        list;
85         u8                      gid[16];
86         enum mlx4_protocol      prot;
87         enum mlx4_steer_type    steer;
88         u64                     reg_id;
89 };
90
91 enum res_qp_states {
92         RES_QP_BUSY = RES_ANY_BUSY,
93
94         /* QP number was allocated */
95         RES_QP_RESERVED,
96
97         /* ICM memory for QP context was mapped */
98         RES_QP_MAPPED,
99
100         /* QP is in hw ownership */
101         RES_QP_HW
102 };
103
104 struct res_qp {
105         struct res_common       com;
106         struct res_mtt         *mtt;
107         struct res_cq          *rcq;
108         struct res_cq          *scq;
109         struct res_srq         *srq;
110         struct list_head        mcg_list;
111         spinlock_t              mcg_spl;
112         int                     local_qpn;
113         atomic_t                ref_count;
114         u32                     qpc_flags;
115         /* saved qp params before VST enforcement in order to restore on VGT */
116         u8                      sched_queue;
117         __be32                  param3;
118         u8                      vlan_control;
119         u8                      fvl_rx;
120         u8                      pri_path_fl;
121         u8                      vlan_index;
122         u8                      feup;
123 };
124
125 enum res_mtt_states {
126         RES_MTT_BUSY = RES_ANY_BUSY,
127         RES_MTT_ALLOCATED,
128 };
129
130 static inline const char *mtt_states_str(enum res_mtt_states state)
131 {
132         switch (state) {
133         case RES_MTT_BUSY: return "RES_MTT_BUSY";
134         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135         default: return "Unknown";
136         }
137 }
138
139 struct res_mtt {
140         struct res_common       com;
141         int                     order;
142         atomic_t                ref_count;
143 };
144
145 enum res_mpt_states {
146         RES_MPT_BUSY = RES_ANY_BUSY,
147         RES_MPT_RESERVED,
148         RES_MPT_MAPPED,
149         RES_MPT_HW,
150 };
151
152 struct res_mpt {
153         struct res_common       com;
154         struct res_mtt         *mtt;
155         int                     key;
156 };
157
158 enum res_eq_states {
159         RES_EQ_BUSY = RES_ANY_BUSY,
160         RES_EQ_RESERVED,
161         RES_EQ_HW,
162 };
163
164 struct res_eq {
165         struct res_common       com;
166         struct res_mtt         *mtt;
167 };
168
169 enum res_cq_states {
170         RES_CQ_BUSY = RES_ANY_BUSY,
171         RES_CQ_ALLOCATED,
172         RES_CQ_HW,
173 };
174
175 struct res_cq {
176         struct res_common       com;
177         struct res_mtt         *mtt;
178         atomic_t                ref_count;
179 };
180
181 enum res_srq_states {
182         RES_SRQ_BUSY = RES_ANY_BUSY,
183         RES_SRQ_ALLOCATED,
184         RES_SRQ_HW,
185 };
186
187 struct res_srq {
188         struct res_common       com;
189         struct res_mtt         *mtt;
190         struct res_cq          *cq;
191         atomic_t                ref_count;
192 };
193
194 enum res_counter_states {
195         RES_COUNTER_BUSY = RES_ANY_BUSY,
196         RES_COUNTER_ALLOCATED,
197 };
198
199 struct res_counter {
200         struct res_common       com;
201         int                     port;
202 };
203
204 enum res_xrcdn_states {
205         RES_XRCD_BUSY = RES_ANY_BUSY,
206         RES_XRCD_ALLOCATED,
207 };
208
209 struct res_xrcdn {
210         struct res_common       com;
211         int                     port;
212 };
213
214 enum res_fs_rule_states {
215         RES_FS_RULE_BUSY = RES_ANY_BUSY,
216         RES_FS_RULE_ALLOCATED,
217 };
218
219 struct res_fs_rule {
220         struct res_common       com;
221         int                     qpn;
222 };
223
224 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225 {
226         return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227 }
228
229 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230 {
231         struct rb_node *node = root->rb_node;
232
233         while (node) {
234                 struct res_common *res = container_of(node, struct res_common,
235                                                       node);
236
237                 if (res_id < res->res_id)
238                         node = node->rb_left;
239                 else if (res_id > res->res_id)
240                         node = node->rb_right;
241                 else
242                         return res;
243         }
244         return NULL;
245 }
246
247 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248 {
249         struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251         /* Figure out where to put new node */
252         while (*new) {
253                 struct res_common *this = container_of(*new, struct res_common,
254                                                        node);
255
256                 parent = *new;
257                 if (res->res_id < this->res_id)
258                         new = &((*new)->rb_left);
259                 else if (res->res_id > this->res_id)
260                         new = &((*new)->rb_right);
261                 else
262                         return -EEXIST;
263         }
264
265         /* Add new node and rebalance tree. */
266         rb_link_node(&res->node, parent, new);
267         rb_insert_color(&res->node, root);
268
269         return 0;
270 }
271
272 enum qp_transition {
273         QP_TRANS_INIT2RTR,
274         QP_TRANS_RTR2RTS,
275         QP_TRANS_RTS2RTS,
276         QP_TRANS_SQERR2RTS,
277         QP_TRANS_SQD2SQD,
278         QP_TRANS_SQD2RTS
279 };
280
281 /* For Debug uses */
282 static const char *resource_str(enum mlx4_resource rt)
283 {
284         switch (rt) {
285         case RES_QP: return "RES_QP";
286         case RES_CQ: return "RES_CQ";
287         case RES_SRQ: return "RES_SRQ";
288         case RES_MPT: return "RES_MPT";
289         case RES_MTT: return "RES_MTT";
290         case RES_MAC: return  "RES_MAC";
291         case RES_VLAN: return  "RES_VLAN";
292         case RES_EQ: return "RES_EQ";
293         case RES_COUNTER: return "RES_COUNTER";
294         case RES_FS_RULE: return "RES_FS_RULE";
295         case RES_XRCD: return "RES_XRCD";
296         default: return "Unknown resource type !!!";
297         };
298 }
299
300 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302                                       enum mlx4_resource res_type, int count,
303                                       int port)
304 {
305         struct mlx4_priv *priv = mlx4_priv(dev);
306         struct resource_allocator *res_alloc =
307                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
308         int err = -EINVAL;
309         int allocated, free, reserved, guaranteed, from_free;
310         int from_rsvd;
311
312         if (slave > dev->persist->num_vfs)
313                 return -EINVAL;
314
315         spin_lock(&res_alloc->alloc_lock);
316         allocated = (port > 0) ?
317                 res_alloc->allocated[(port - 1) *
318                 (dev->persist->num_vfs + 1) + slave] :
319                 res_alloc->allocated[slave];
320         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
321                 res_alloc->res_free;
322         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
323                 res_alloc->res_reserved;
324         guaranteed = res_alloc->guaranteed[slave];
325
326         if (allocated + count > res_alloc->quota[slave]) {
327                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
328                           slave, port, resource_str(res_type), count,
329                           allocated, res_alloc->quota[slave]);
330                 goto out;
331         }
332
333         if (allocated + count <= guaranteed) {
334                 err = 0;
335                 from_rsvd = count;
336         } else {
337                 /* portion may need to be obtained from free area */
338                 if (guaranteed - allocated > 0)
339                         from_free = count - (guaranteed - allocated);
340                 else
341                         from_free = count;
342
343                 from_rsvd = count - from_free;
344
345                 if (free - from_free >= reserved)
346                         err = 0;
347                 else
348                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
349                                   slave, port, resource_str(res_type), free,
350                                   from_free, reserved);
351         }
352
353         if (!err) {
354                 /* grant the request */
355                 if (port > 0) {
356                         res_alloc->allocated[(port - 1) *
357                         (dev->persist->num_vfs + 1) + slave] += count;
358                         res_alloc->res_port_free[port - 1] -= count;
359                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
360                 } else {
361                         res_alloc->allocated[slave] += count;
362                         res_alloc->res_free -= count;
363                         res_alloc->res_reserved -= from_rsvd;
364                 }
365         }
366
367 out:
368         spin_unlock(&res_alloc->alloc_lock);
369         return err;
370 }
371
372 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
373                                     enum mlx4_resource res_type, int count,
374                                     int port)
375 {
376         struct mlx4_priv *priv = mlx4_priv(dev);
377         struct resource_allocator *res_alloc =
378                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
379         int allocated, guaranteed, from_rsvd;
380
381         if (slave > dev->persist->num_vfs)
382                 return;
383
384         spin_lock(&res_alloc->alloc_lock);
385
386         allocated = (port > 0) ?
387                 res_alloc->allocated[(port - 1) *
388                 (dev->persist->num_vfs + 1) + slave] :
389                 res_alloc->allocated[slave];
390         guaranteed = res_alloc->guaranteed[slave];
391
392         if (allocated - count >= guaranteed) {
393                 from_rsvd = 0;
394         } else {
395                 /* portion may need to be returned to reserved area */
396                 if (allocated - guaranteed > 0)
397                         from_rsvd = count - (allocated - guaranteed);
398                 else
399                         from_rsvd = count;
400         }
401
402         if (port > 0) {
403                 res_alloc->allocated[(port - 1) *
404                 (dev->persist->num_vfs + 1) + slave] -= count;
405                 res_alloc->res_port_free[port - 1] += count;
406                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
407         } else {
408                 res_alloc->allocated[slave] -= count;
409                 res_alloc->res_free += count;
410                 res_alloc->res_reserved += from_rsvd;
411         }
412
413         spin_unlock(&res_alloc->alloc_lock);
414         return;
415 }
416
417 static inline void initialize_res_quotas(struct mlx4_dev *dev,
418                                          struct resource_allocator *res_alloc,
419                                          enum mlx4_resource res_type,
420                                          int vf, int num_instances)
421 {
422         res_alloc->guaranteed[vf] = num_instances /
423                                     (2 * (dev->persist->num_vfs + 1));
424         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
425         if (vf == mlx4_master_func_num(dev)) {
426                 res_alloc->res_free = num_instances;
427                 if (res_type == RES_MTT) {
428                         /* reserved mtts will be taken out of the PF allocation */
429                         res_alloc->res_free += dev->caps.reserved_mtts;
430                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
431                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
432                 }
433         }
434 }
435
436 void mlx4_init_quotas(struct mlx4_dev *dev)
437 {
438         struct mlx4_priv *priv = mlx4_priv(dev);
439         int pf;
440
441         /* quotas for VFs are initialized in mlx4_slave_cap */
442         if (mlx4_is_slave(dev))
443                 return;
444
445         if (!mlx4_is_mfunc(dev)) {
446                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
447                         mlx4_num_reserved_sqps(dev);
448                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
449                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
450                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
451                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
452                 return;
453         }
454
455         pf = mlx4_master_func_num(dev);
456         dev->quotas.qp =
457                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
458         dev->quotas.cq =
459                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
460         dev->quotas.srq =
461                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
462         dev->quotas.mtt =
463                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
464         dev->quotas.mpt =
465                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
466 }
467 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
468 {
469         struct mlx4_priv *priv = mlx4_priv(dev);
470         int i, j;
471         int t;
472
473         priv->mfunc.master.res_tracker.slave_list =
474                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
475                         GFP_KERNEL);
476         if (!priv->mfunc.master.res_tracker.slave_list)
477                 return -ENOMEM;
478
479         for (i = 0 ; i < dev->num_slaves; i++) {
480                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
481                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
482                                        slave_list[i].res_list[t]);
483                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
484         }
485
486         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
487                  dev->num_slaves);
488         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
489                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
490
491         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
492                 struct resource_allocator *res_alloc =
493                         &priv->mfunc.master.res_tracker.res_alloc[i];
494                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
495                                            sizeof(int), GFP_KERNEL);
496                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
497                                                 sizeof(int), GFP_KERNEL);
498                 if (i == RES_MAC || i == RES_VLAN)
499                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
500                                                        (dev->persist->num_vfs
501                                                        + 1) *
502                                                        sizeof(int), GFP_KERNEL);
503                 else
504                         res_alloc->allocated = kzalloc((dev->persist->
505                                                         num_vfs + 1) *
506                                                        sizeof(int), GFP_KERNEL);
507
508                 if (!res_alloc->quota || !res_alloc->guaranteed ||
509                     !res_alloc->allocated)
510                         goto no_mem_err;
511
512                 spin_lock_init(&res_alloc->alloc_lock);
513                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
514                         struct mlx4_active_ports actv_ports =
515                                 mlx4_get_active_ports(dev, t);
516                         switch (i) {
517                         case RES_QP:
518                                 initialize_res_quotas(dev, res_alloc, RES_QP,
519                                                       t, dev->caps.num_qps -
520                                                       dev->caps.reserved_qps -
521                                                       mlx4_num_reserved_sqps(dev));
522                                 break;
523                         case RES_CQ:
524                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
525                                                       t, dev->caps.num_cqs -
526                                                       dev->caps.reserved_cqs);
527                                 break;
528                         case RES_SRQ:
529                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
530                                                       t, dev->caps.num_srqs -
531                                                       dev->caps.reserved_srqs);
532                                 break;
533                         case RES_MPT:
534                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
535                                                       t, dev->caps.num_mpts -
536                                                       dev->caps.reserved_mrws);
537                                 break;
538                         case RES_MTT:
539                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
540                                                       t, dev->caps.num_mtts -
541                                                       dev->caps.reserved_mtts);
542                                 break;
543                         case RES_MAC:
544                                 if (t == mlx4_master_func_num(dev)) {
545                                         int max_vfs_pport = 0;
546                                         /* Calculate the max vfs per port for */
547                                         /* both ports.                        */
548                                         for (j = 0; j < dev->caps.num_ports;
549                                              j++) {
550                                                 struct mlx4_slaves_pport slaves_pport =
551                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
552                                                 unsigned current_slaves =
553                                                         bitmap_weight(slaves_pport.slaves,
554                                                                       dev->caps.num_ports) - 1;
555                                                 if (max_vfs_pport < current_slaves)
556                                                         max_vfs_pport =
557                                                                 current_slaves;
558                                         }
559                                         res_alloc->quota[t] =
560                                                 MLX4_MAX_MAC_NUM -
561                                                 2 * max_vfs_pport;
562                                         res_alloc->guaranteed[t] = 2;
563                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
564                                                 res_alloc->res_port_free[j] =
565                                                         MLX4_MAX_MAC_NUM;
566                                 } else {
567                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
568                                         res_alloc->guaranteed[t] = 2;
569                                 }
570                                 break;
571                         case RES_VLAN:
572                                 if (t == mlx4_master_func_num(dev)) {
573                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
574                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
575                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
576                                                 res_alloc->res_port_free[j] =
577                                                         res_alloc->quota[t];
578                                 } else {
579                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
580                                         res_alloc->guaranteed[t] = 0;
581                                 }
582                                 break;
583                         case RES_COUNTER:
584                                 res_alloc->quota[t] = dev->caps.max_counters;
585                                 res_alloc->guaranteed[t] = 0;
586                                 if (t == mlx4_master_func_num(dev))
587                                         res_alloc->res_free = res_alloc->quota[t];
588                                 break;
589                         default:
590                                 break;
591                         }
592                         if (i == RES_MAC || i == RES_VLAN) {
593                                 for (j = 0; j < dev->caps.num_ports; j++)
594                                         if (test_bit(j, actv_ports.ports))
595                                                 res_alloc->res_port_rsvd[j] +=
596                                                         res_alloc->guaranteed[t];
597                         } else {
598                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
599                         }
600                 }
601         }
602         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
603         return 0;
604
605 no_mem_err:
606         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
607                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
608                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
609                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
610                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
611                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
612                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
613         }
614         return -ENOMEM;
615 }
616
617 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
618                                 enum mlx4_res_tracker_free_type type)
619 {
620         struct mlx4_priv *priv = mlx4_priv(dev);
621         int i;
622
623         if (priv->mfunc.master.res_tracker.slave_list) {
624                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
625                         for (i = 0; i < dev->num_slaves; i++) {
626                                 if (type == RES_TR_FREE_ALL ||
627                                     dev->caps.function != i)
628                                         mlx4_delete_all_resources_for_slave(dev, i);
629                         }
630                         /* free master's vlans */
631                         i = dev->caps.function;
632                         mlx4_reset_roce_gids(dev, i);
633                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
634                         rem_slave_vlans(dev, i);
635                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
636                 }
637
638                 if (type != RES_TR_FREE_SLAVES_ONLY) {
639                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
640                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
641                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
642                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
643                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
644                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
645                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
646                         }
647                         kfree(priv->mfunc.master.res_tracker.slave_list);
648                         priv->mfunc.master.res_tracker.slave_list = NULL;
649                 }
650         }
651 }
652
653 static void update_pkey_index(struct mlx4_dev *dev, int slave,
654                               struct mlx4_cmd_mailbox *inbox)
655 {
656         u8 sched = *(u8 *)(inbox->buf + 64);
657         u8 orig_index = *(u8 *)(inbox->buf + 35);
658         u8 new_index;
659         struct mlx4_priv *priv = mlx4_priv(dev);
660         int port;
661
662         port = (sched >> 6 & 1) + 1;
663
664         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
665         *(u8 *)(inbox->buf + 35) = new_index;
666 }
667
668 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
669                        u8 slave)
670 {
671         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
672         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
673         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
674         int port;
675
676         if (MLX4_QP_ST_UD == ts) {
677                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
678                 if (mlx4_is_eth(dev, port))
679                         qp_ctx->pri_path.mgid_index =
680                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
681                 else
682                         qp_ctx->pri_path.mgid_index = slave | 0x80;
683
684         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
685                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
686                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
687                         if (mlx4_is_eth(dev, port)) {
688                                 qp_ctx->pri_path.mgid_index +=
689                                         mlx4_get_base_gid_ix(dev, slave, port);
690                                 qp_ctx->pri_path.mgid_index &= 0x7f;
691                         } else {
692                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
693                         }
694                 }
695                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
696                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
697                         if (mlx4_is_eth(dev, port)) {
698                                 qp_ctx->alt_path.mgid_index +=
699                                         mlx4_get_base_gid_ix(dev, slave, port);
700                                 qp_ctx->alt_path.mgid_index &= 0x7f;
701                         } else {
702                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
703                         }
704                 }
705         }
706 }
707
708 static int update_vport_qp_param(struct mlx4_dev *dev,
709                                  struct mlx4_cmd_mailbox *inbox,
710                                  u8 slave, u32 qpn)
711 {
712         struct mlx4_qp_context  *qpc = inbox->buf + 8;
713         struct mlx4_vport_oper_state *vp_oper;
714         struct mlx4_priv *priv;
715         u32 qp_type;
716         int port, err = 0;
717
718         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
719         priv = mlx4_priv(dev);
720         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
721         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
722
723         if (MLX4_VGT != vp_oper->state.default_vlan) {
724                 /* the reserved QPs (special, proxy, tunnel)
725                  * do not operate over vlans
726                  */
727                 if (mlx4_is_qp_reserved(dev, qpn))
728                         return 0;
729
730                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
731                 if (qp_type == MLX4_QP_ST_UD ||
732                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
733                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
734                                 *(__be32 *)inbox->buf =
735                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
736                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
737                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
738                         } else {
739                                 struct mlx4_update_qp_params params = {.flags = 0};
740
741                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
742                                 if (err)
743                                         goto out;
744                         }
745                 }
746
747                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
748                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
749                         qpc->pri_path.vlan_control =
750                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
751                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
752                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
753                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
754                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
755                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
756                 } else if (0 != vp_oper->state.default_vlan) {
757                         qpc->pri_path.vlan_control =
758                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
759                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
760                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
761                 } else { /* priority tagged */
762                         qpc->pri_path.vlan_control =
763                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
764                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
765                 }
766
767                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
768                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
769                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
770                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
771                 qpc->pri_path.sched_queue &= 0xC7;
772                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
773         }
774         if (vp_oper->state.spoofchk) {
775                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
776                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
777         }
778 out:
779         return err;
780 }
781
782 static int mpt_mask(struct mlx4_dev *dev)
783 {
784         return dev->caps.num_mpts - 1;
785 }
786
787 static void *find_res(struct mlx4_dev *dev, u64 res_id,
788                       enum mlx4_resource type)
789 {
790         struct mlx4_priv *priv = mlx4_priv(dev);
791
792         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
793                                   res_id);
794 }
795
796 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
797                    enum mlx4_resource type,
798                    void *res)
799 {
800         struct res_common *r;
801         int err = 0;
802
803         spin_lock_irq(mlx4_tlock(dev));
804         r = find_res(dev, res_id, type);
805         if (!r) {
806                 err = -ENONET;
807                 goto exit;
808         }
809
810         if (r->state == RES_ANY_BUSY) {
811                 err = -EBUSY;
812                 goto exit;
813         }
814
815         if (r->owner != slave) {
816                 err = -EPERM;
817                 goto exit;
818         }
819
820         r->from_state = r->state;
821         r->state = RES_ANY_BUSY;
822
823         if (res)
824                 *((struct res_common **)res) = r;
825
826 exit:
827         spin_unlock_irq(mlx4_tlock(dev));
828         return err;
829 }
830
831 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
832                                     enum mlx4_resource type,
833                                     u64 res_id, int *slave)
834 {
835
836         struct res_common *r;
837         int err = -ENOENT;
838         int id = res_id;
839
840         if (type == RES_QP)
841                 id &= 0x7fffff;
842         spin_lock(mlx4_tlock(dev));
843
844         r = find_res(dev, id, type);
845         if (r) {
846                 *slave = r->owner;
847                 err = 0;
848         }
849         spin_unlock(mlx4_tlock(dev));
850
851         return err;
852 }
853
854 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
855                     enum mlx4_resource type)
856 {
857         struct res_common *r;
858
859         spin_lock_irq(mlx4_tlock(dev));
860         r = find_res(dev, res_id, type);
861         if (r)
862                 r->state = r->from_state;
863         spin_unlock_irq(mlx4_tlock(dev));
864 }
865
866 static struct res_common *alloc_qp_tr(int id)
867 {
868         struct res_qp *ret;
869
870         ret = kzalloc(sizeof *ret, GFP_KERNEL);
871         if (!ret)
872                 return NULL;
873
874         ret->com.res_id = id;
875         ret->com.state = RES_QP_RESERVED;
876         ret->local_qpn = id;
877         INIT_LIST_HEAD(&ret->mcg_list);
878         spin_lock_init(&ret->mcg_spl);
879         atomic_set(&ret->ref_count, 0);
880
881         return &ret->com;
882 }
883
884 static struct res_common *alloc_mtt_tr(int id, int order)
885 {
886         struct res_mtt *ret;
887
888         ret = kzalloc(sizeof *ret, GFP_KERNEL);
889         if (!ret)
890                 return NULL;
891
892         ret->com.res_id = id;
893         ret->order = order;
894         ret->com.state = RES_MTT_ALLOCATED;
895         atomic_set(&ret->ref_count, 0);
896
897         return &ret->com;
898 }
899
900 static struct res_common *alloc_mpt_tr(int id, int key)
901 {
902         struct res_mpt *ret;
903
904         ret = kzalloc(sizeof *ret, GFP_KERNEL);
905         if (!ret)
906                 return NULL;
907
908         ret->com.res_id = id;
909         ret->com.state = RES_MPT_RESERVED;
910         ret->key = key;
911
912         return &ret->com;
913 }
914
915 static struct res_common *alloc_eq_tr(int id)
916 {
917         struct res_eq *ret;
918
919         ret = kzalloc(sizeof *ret, GFP_KERNEL);
920         if (!ret)
921                 return NULL;
922
923         ret->com.res_id = id;
924         ret->com.state = RES_EQ_RESERVED;
925
926         return &ret->com;
927 }
928
929 static struct res_common *alloc_cq_tr(int id)
930 {
931         struct res_cq *ret;
932
933         ret = kzalloc(sizeof *ret, GFP_KERNEL);
934         if (!ret)
935                 return NULL;
936
937         ret->com.res_id = id;
938         ret->com.state = RES_CQ_ALLOCATED;
939         atomic_set(&ret->ref_count, 0);
940
941         return &ret->com;
942 }
943
944 static struct res_common *alloc_srq_tr(int id)
945 {
946         struct res_srq *ret;
947
948         ret = kzalloc(sizeof *ret, GFP_KERNEL);
949         if (!ret)
950                 return NULL;
951
952         ret->com.res_id = id;
953         ret->com.state = RES_SRQ_ALLOCATED;
954         atomic_set(&ret->ref_count, 0);
955
956         return &ret->com;
957 }
958
959 static struct res_common *alloc_counter_tr(int id)
960 {
961         struct res_counter *ret;
962
963         ret = kzalloc(sizeof *ret, GFP_KERNEL);
964         if (!ret)
965                 return NULL;
966
967         ret->com.res_id = id;
968         ret->com.state = RES_COUNTER_ALLOCATED;
969
970         return &ret->com;
971 }
972
973 static struct res_common *alloc_xrcdn_tr(int id)
974 {
975         struct res_xrcdn *ret;
976
977         ret = kzalloc(sizeof *ret, GFP_KERNEL);
978         if (!ret)
979                 return NULL;
980
981         ret->com.res_id = id;
982         ret->com.state = RES_XRCD_ALLOCATED;
983
984         return &ret->com;
985 }
986
987 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
988 {
989         struct res_fs_rule *ret;
990
991         ret = kzalloc(sizeof *ret, GFP_KERNEL);
992         if (!ret)
993                 return NULL;
994
995         ret->com.res_id = id;
996         ret->com.state = RES_FS_RULE_ALLOCATED;
997         ret->qpn = qpn;
998         return &ret->com;
999 }
1000
1001 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1002                                    int extra)
1003 {
1004         struct res_common *ret;
1005
1006         switch (type) {
1007         case RES_QP:
1008                 ret = alloc_qp_tr(id);
1009                 break;
1010         case RES_MPT:
1011                 ret = alloc_mpt_tr(id, extra);
1012                 break;
1013         case RES_MTT:
1014                 ret = alloc_mtt_tr(id, extra);
1015                 break;
1016         case RES_EQ:
1017                 ret = alloc_eq_tr(id);
1018                 break;
1019         case RES_CQ:
1020                 ret = alloc_cq_tr(id);
1021                 break;
1022         case RES_SRQ:
1023                 ret = alloc_srq_tr(id);
1024                 break;
1025         case RES_MAC:
1026                 pr_err("implementation missing\n");
1027                 return NULL;
1028         case RES_COUNTER:
1029                 ret = alloc_counter_tr(id);
1030                 break;
1031         case RES_XRCD:
1032                 ret = alloc_xrcdn_tr(id);
1033                 break;
1034         case RES_FS_RULE:
1035                 ret = alloc_fs_rule_tr(id, extra);
1036                 break;
1037         default:
1038                 return NULL;
1039         }
1040         if (ret)
1041                 ret->owner = slave;
1042
1043         return ret;
1044 }
1045
1046 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1047                          enum mlx4_resource type, int extra)
1048 {
1049         int i;
1050         int err;
1051         struct mlx4_priv *priv = mlx4_priv(dev);
1052         struct res_common **res_arr;
1053         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1054         struct rb_root *root = &tracker->res_tree[type];
1055
1056         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1057         if (!res_arr)
1058                 return -ENOMEM;
1059
1060         for (i = 0; i < count; ++i) {
1061                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1062                 if (!res_arr[i]) {
1063                         for (--i; i >= 0; --i)
1064                                 kfree(res_arr[i]);
1065
1066                         kfree(res_arr);
1067                         return -ENOMEM;
1068                 }
1069         }
1070
1071         spin_lock_irq(mlx4_tlock(dev));
1072         for (i = 0; i < count; ++i) {
1073                 if (find_res(dev, base + i, type)) {
1074                         err = -EEXIST;
1075                         goto undo;
1076                 }
1077                 err = res_tracker_insert(root, res_arr[i]);
1078                 if (err)
1079                         goto undo;
1080                 list_add_tail(&res_arr[i]->list,
1081                               &tracker->slave_list[slave].res_list[type]);
1082         }
1083         spin_unlock_irq(mlx4_tlock(dev));
1084         kfree(res_arr);
1085
1086         return 0;
1087
1088 undo:
1089         for (--i; i >= base; --i)
1090                 rb_erase(&res_arr[i]->node, root);
1091
1092         spin_unlock_irq(mlx4_tlock(dev));
1093
1094         for (i = 0; i < count; ++i)
1095                 kfree(res_arr[i]);
1096
1097         kfree(res_arr);
1098
1099         return err;
1100 }
1101
1102 static int remove_qp_ok(struct res_qp *res)
1103 {
1104         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1105             !list_empty(&res->mcg_list)) {
1106                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1107                        res->com.state, atomic_read(&res->ref_count));
1108                 return -EBUSY;
1109         } else if (res->com.state != RES_QP_RESERVED) {
1110                 return -EPERM;
1111         }
1112
1113         return 0;
1114 }
1115
1116 static int remove_mtt_ok(struct res_mtt *res, int order)
1117 {
1118         if (res->com.state == RES_MTT_BUSY ||
1119             atomic_read(&res->ref_count)) {
1120                 pr_devel("%s-%d: state %s, ref_count %d\n",
1121                          __func__, __LINE__,
1122                          mtt_states_str(res->com.state),
1123                          atomic_read(&res->ref_count));
1124                 return -EBUSY;
1125         } else if (res->com.state != RES_MTT_ALLOCATED)
1126                 return -EPERM;
1127         else if (res->order != order)
1128                 return -EINVAL;
1129
1130         return 0;
1131 }
1132
1133 static int remove_mpt_ok(struct res_mpt *res)
1134 {
1135         if (res->com.state == RES_MPT_BUSY)
1136                 return -EBUSY;
1137         else if (res->com.state != RES_MPT_RESERVED)
1138                 return -EPERM;
1139
1140         return 0;
1141 }
1142
1143 static int remove_eq_ok(struct res_eq *res)
1144 {
1145         if (res->com.state == RES_MPT_BUSY)
1146                 return -EBUSY;
1147         else if (res->com.state != RES_MPT_RESERVED)
1148                 return -EPERM;
1149
1150         return 0;
1151 }
1152
1153 static int remove_counter_ok(struct res_counter *res)
1154 {
1155         if (res->com.state == RES_COUNTER_BUSY)
1156                 return -EBUSY;
1157         else if (res->com.state != RES_COUNTER_ALLOCATED)
1158                 return -EPERM;
1159
1160         return 0;
1161 }
1162
1163 static int remove_xrcdn_ok(struct res_xrcdn *res)
1164 {
1165         if (res->com.state == RES_XRCD_BUSY)
1166                 return -EBUSY;
1167         else if (res->com.state != RES_XRCD_ALLOCATED)
1168                 return -EPERM;
1169
1170         return 0;
1171 }
1172
1173 static int remove_fs_rule_ok(struct res_fs_rule *res)
1174 {
1175         if (res->com.state == RES_FS_RULE_BUSY)
1176                 return -EBUSY;
1177         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1178                 return -EPERM;
1179
1180         return 0;
1181 }
1182
1183 static int remove_cq_ok(struct res_cq *res)
1184 {
1185         if (res->com.state == RES_CQ_BUSY)
1186                 return -EBUSY;
1187         else if (res->com.state != RES_CQ_ALLOCATED)
1188                 return -EPERM;
1189
1190         return 0;
1191 }
1192
1193 static int remove_srq_ok(struct res_srq *res)
1194 {
1195         if (res->com.state == RES_SRQ_BUSY)
1196                 return -EBUSY;
1197         else if (res->com.state != RES_SRQ_ALLOCATED)
1198                 return -EPERM;
1199
1200         return 0;
1201 }
1202
1203 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1204 {
1205         switch (type) {
1206         case RES_QP:
1207                 return remove_qp_ok((struct res_qp *)res);
1208         case RES_CQ:
1209                 return remove_cq_ok((struct res_cq *)res);
1210         case RES_SRQ:
1211                 return remove_srq_ok((struct res_srq *)res);
1212         case RES_MPT:
1213                 return remove_mpt_ok((struct res_mpt *)res);
1214         case RES_MTT:
1215                 return remove_mtt_ok((struct res_mtt *)res, extra);
1216         case RES_MAC:
1217                 return -ENOSYS;
1218         case RES_EQ:
1219                 return remove_eq_ok((struct res_eq *)res);
1220         case RES_COUNTER:
1221                 return remove_counter_ok((struct res_counter *)res);
1222         case RES_XRCD:
1223                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1224         case RES_FS_RULE:
1225                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1226         default:
1227                 return -EINVAL;
1228         }
1229 }
1230
1231 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1232                          enum mlx4_resource type, int extra)
1233 {
1234         u64 i;
1235         int err;
1236         struct mlx4_priv *priv = mlx4_priv(dev);
1237         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1238         struct res_common *r;
1239
1240         spin_lock_irq(mlx4_tlock(dev));
1241         for (i = base; i < base + count; ++i) {
1242                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1243                 if (!r) {
1244                         err = -ENOENT;
1245                         goto out;
1246                 }
1247                 if (r->owner != slave) {
1248                         err = -EPERM;
1249                         goto out;
1250                 }
1251                 err = remove_ok(r, type, extra);
1252                 if (err)
1253                         goto out;
1254         }
1255
1256         for (i = base; i < base + count; ++i) {
1257                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1258                 rb_erase(&r->node, &tracker->res_tree[type]);
1259                 list_del(&r->list);
1260                 kfree(r);
1261         }
1262         err = 0;
1263
1264 out:
1265         spin_unlock_irq(mlx4_tlock(dev));
1266
1267         return err;
1268 }
1269
1270 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1271                                 enum res_qp_states state, struct res_qp **qp,
1272                                 int alloc)
1273 {
1274         struct mlx4_priv *priv = mlx4_priv(dev);
1275         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1276         struct res_qp *r;
1277         int err = 0;
1278
1279         spin_lock_irq(mlx4_tlock(dev));
1280         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1281         if (!r)
1282                 err = -ENOENT;
1283         else if (r->com.owner != slave)
1284                 err = -EPERM;
1285         else {
1286                 switch (state) {
1287                 case RES_QP_BUSY:
1288                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1289                                  __func__, r->com.res_id);
1290                         err = -EBUSY;
1291                         break;
1292
1293                 case RES_QP_RESERVED:
1294                         if (r->com.state == RES_QP_MAPPED && !alloc)
1295                                 break;
1296
1297                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1298                         err = -EINVAL;
1299                         break;
1300
1301                 case RES_QP_MAPPED:
1302                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1303                             r->com.state == RES_QP_HW)
1304                                 break;
1305                         else {
1306                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1307                                           r->com.res_id);
1308                                 err = -EINVAL;
1309                         }
1310
1311                         break;
1312
1313                 case RES_QP_HW:
1314                         if (r->com.state != RES_QP_MAPPED)
1315                                 err = -EINVAL;
1316                         break;
1317                 default:
1318                         err = -EINVAL;
1319                 }
1320
1321                 if (!err) {
1322                         r->com.from_state = r->com.state;
1323                         r->com.to_state = state;
1324                         r->com.state = RES_QP_BUSY;
1325                         if (qp)
1326                                 *qp = r;
1327                 }
1328         }
1329
1330         spin_unlock_irq(mlx4_tlock(dev));
1331
1332         return err;
1333 }
1334
1335 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1336                                 enum res_mpt_states state, struct res_mpt **mpt)
1337 {
1338         struct mlx4_priv *priv = mlx4_priv(dev);
1339         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1340         struct res_mpt *r;
1341         int err = 0;
1342
1343         spin_lock_irq(mlx4_tlock(dev));
1344         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1345         if (!r)
1346                 err = -ENOENT;
1347         else if (r->com.owner != slave)
1348                 err = -EPERM;
1349         else {
1350                 switch (state) {
1351                 case RES_MPT_BUSY:
1352                         err = -EINVAL;
1353                         break;
1354
1355                 case RES_MPT_RESERVED:
1356                         if (r->com.state != RES_MPT_MAPPED)
1357                                 err = -EINVAL;
1358                         break;
1359
1360                 case RES_MPT_MAPPED:
1361                         if (r->com.state != RES_MPT_RESERVED &&
1362                             r->com.state != RES_MPT_HW)
1363                                 err = -EINVAL;
1364                         break;
1365
1366                 case RES_MPT_HW:
1367                         if (r->com.state != RES_MPT_MAPPED)
1368                                 err = -EINVAL;
1369                         break;
1370                 default:
1371                         err = -EINVAL;
1372                 }
1373
1374                 if (!err) {
1375                         r->com.from_state = r->com.state;
1376                         r->com.to_state = state;
1377                         r->com.state = RES_MPT_BUSY;
1378                         if (mpt)
1379                                 *mpt = r;
1380                 }
1381         }
1382
1383         spin_unlock_irq(mlx4_tlock(dev));
1384
1385         return err;
1386 }
1387
1388 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1389                                 enum res_eq_states state, struct res_eq **eq)
1390 {
1391         struct mlx4_priv *priv = mlx4_priv(dev);
1392         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1393         struct res_eq *r;
1394         int err = 0;
1395
1396         spin_lock_irq(mlx4_tlock(dev));
1397         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1398         if (!r)
1399                 err = -ENOENT;
1400         else if (r->com.owner != slave)
1401                 err = -EPERM;
1402         else {
1403                 switch (state) {
1404                 case RES_EQ_BUSY:
1405                         err = -EINVAL;
1406                         break;
1407
1408                 case RES_EQ_RESERVED:
1409                         if (r->com.state != RES_EQ_HW)
1410                                 err = -EINVAL;
1411                         break;
1412
1413                 case RES_EQ_HW:
1414                         if (r->com.state != RES_EQ_RESERVED)
1415                                 err = -EINVAL;
1416                         break;
1417
1418                 default:
1419                         err = -EINVAL;
1420                 }
1421
1422                 if (!err) {
1423                         r->com.from_state = r->com.state;
1424                         r->com.to_state = state;
1425                         r->com.state = RES_EQ_BUSY;
1426                         if (eq)
1427                                 *eq = r;
1428                 }
1429         }
1430
1431         spin_unlock_irq(mlx4_tlock(dev));
1432
1433         return err;
1434 }
1435
1436 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1437                                 enum res_cq_states state, struct res_cq **cq)
1438 {
1439         struct mlx4_priv *priv = mlx4_priv(dev);
1440         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1441         struct res_cq *r;
1442         int err;
1443
1444         spin_lock_irq(mlx4_tlock(dev));
1445         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1446         if (!r) {
1447                 err = -ENOENT;
1448         } else if (r->com.owner != slave) {
1449                 err = -EPERM;
1450         } else if (state == RES_CQ_ALLOCATED) {
1451                 if (r->com.state != RES_CQ_HW)
1452                         err = -EINVAL;
1453                 else if (atomic_read(&r->ref_count))
1454                         err = -EBUSY;
1455                 else
1456                         err = 0;
1457         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1458                 err = -EINVAL;
1459         } else {
1460                 err = 0;
1461         }
1462
1463         if (!err) {
1464                 r->com.from_state = r->com.state;
1465                 r->com.to_state = state;
1466                 r->com.state = RES_CQ_BUSY;
1467                 if (cq)
1468                         *cq = r;
1469         }
1470
1471         spin_unlock_irq(mlx4_tlock(dev));
1472
1473         return err;
1474 }
1475
1476 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1477                                  enum res_srq_states state, struct res_srq **srq)
1478 {
1479         struct mlx4_priv *priv = mlx4_priv(dev);
1480         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1481         struct res_srq *r;
1482         int err = 0;
1483
1484         spin_lock_irq(mlx4_tlock(dev));
1485         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1486         if (!r) {
1487                 err = -ENOENT;
1488         } else if (r->com.owner != slave) {
1489                 err = -EPERM;
1490         } else if (state == RES_SRQ_ALLOCATED) {
1491                 if (r->com.state != RES_SRQ_HW)
1492                         err = -EINVAL;
1493                 else if (atomic_read(&r->ref_count))
1494                         err = -EBUSY;
1495         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1496                 err = -EINVAL;
1497         }
1498
1499         if (!err) {
1500                 r->com.from_state = r->com.state;
1501                 r->com.to_state = state;
1502                 r->com.state = RES_SRQ_BUSY;
1503                 if (srq)
1504                         *srq = r;
1505         }
1506
1507         spin_unlock_irq(mlx4_tlock(dev));
1508
1509         return err;
1510 }
1511
1512 static void res_abort_move(struct mlx4_dev *dev, int slave,
1513                            enum mlx4_resource type, int id)
1514 {
1515         struct mlx4_priv *priv = mlx4_priv(dev);
1516         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1517         struct res_common *r;
1518
1519         spin_lock_irq(mlx4_tlock(dev));
1520         r = res_tracker_lookup(&tracker->res_tree[type], id);
1521         if (r && (r->owner == slave))
1522                 r->state = r->from_state;
1523         spin_unlock_irq(mlx4_tlock(dev));
1524 }
1525
1526 static void res_end_move(struct mlx4_dev *dev, int slave,
1527                          enum mlx4_resource type, int id)
1528 {
1529         struct mlx4_priv *priv = mlx4_priv(dev);
1530         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1531         struct res_common *r;
1532
1533         spin_lock_irq(mlx4_tlock(dev));
1534         r = res_tracker_lookup(&tracker->res_tree[type], id);
1535         if (r && (r->owner == slave))
1536                 r->state = r->to_state;
1537         spin_unlock_irq(mlx4_tlock(dev));
1538 }
1539
1540 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1541 {
1542         return mlx4_is_qp_reserved(dev, qpn) &&
1543                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1544 }
1545
1546 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1547 {
1548         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1549 }
1550
1551 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1552                         u64 in_param, u64 *out_param)
1553 {
1554         int err;
1555         int count;
1556         int align;
1557         int base;
1558         int qpn;
1559         u8 flags;
1560
1561         switch (op) {
1562         case RES_OP_RESERVE:
1563                 count = get_param_l(&in_param) & 0xffffff;
1564                 /* Turn off all unsupported QP allocation flags that the
1565                  * slave tries to set.
1566                  */
1567                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1568                 align = get_param_h(&in_param);
1569                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1570                 if (err)
1571                         return err;
1572
1573                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1574                 if (err) {
1575                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1576                         return err;
1577                 }
1578
1579                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1580                 if (err) {
1581                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1582                         __mlx4_qp_release_range(dev, base, count);
1583                         return err;
1584                 }
1585                 set_param_l(out_param, base);
1586                 break;
1587         case RES_OP_MAP_ICM:
1588                 qpn = get_param_l(&in_param) & 0x7fffff;
1589                 if (valid_reserved(dev, slave, qpn)) {
1590                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1591                         if (err)
1592                                 return err;
1593                 }
1594
1595                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1596                                            NULL, 1);
1597                 if (err)
1598                         return err;
1599
1600                 if (!fw_reserved(dev, qpn)) {
1601                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1602                         if (err) {
1603                                 res_abort_move(dev, slave, RES_QP, qpn);
1604                                 return err;
1605                         }
1606                 }
1607
1608                 res_end_move(dev, slave, RES_QP, qpn);
1609                 break;
1610
1611         default:
1612                 err = -EINVAL;
1613                 break;
1614         }
1615         return err;
1616 }
1617
1618 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1619                          u64 in_param, u64 *out_param)
1620 {
1621         int err = -EINVAL;
1622         int base;
1623         int order;
1624
1625         if (op != RES_OP_RESERVE_AND_MAP)
1626                 return err;
1627
1628         order = get_param_l(&in_param);
1629
1630         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1631         if (err)
1632                 return err;
1633
1634         base = __mlx4_alloc_mtt_range(dev, order);
1635         if (base == -1) {
1636                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1637                 return -ENOMEM;
1638         }
1639
1640         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1641         if (err) {
1642                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1643                 __mlx4_free_mtt_range(dev, base, order);
1644         } else {
1645                 set_param_l(out_param, base);
1646         }
1647
1648         return err;
1649 }
1650
1651 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1652                          u64 in_param, u64 *out_param)
1653 {
1654         int err = -EINVAL;
1655         int index;
1656         int id;
1657         struct res_mpt *mpt;
1658
1659         switch (op) {
1660         case RES_OP_RESERVE:
1661                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1662                 if (err)
1663                         break;
1664
1665                 index = __mlx4_mpt_reserve(dev);
1666                 if (index == -1) {
1667                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1668                         break;
1669                 }
1670                 id = index & mpt_mask(dev);
1671
1672                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1673                 if (err) {
1674                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1675                         __mlx4_mpt_release(dev, index);
1676                         break;
1677                 }
1678                 set_param_l(out_param, index);
1679                 break;
1680         case RES_OP_MAP_ICM:
1681                 index = get_param_l(&in_param);
1682                 id = index & mpt_mask(dev);
1683                 err = mr_res_start_move_to(dev, slave, id,
1684                                            RES_MPT_MAPPED, &mpt);
1685                 if (err)
1686                         return err;
1687
1688                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1689                 if (err) {
1690                         res_abort_move(dev, slave, RES_MPT, id);
1691                         return err;
1692                 }
1693
1694                 res_end_move(dev, slave, RES_MPT, id);
1695                 break;
1696         }
1697         return err;
1698 }
1699
1700 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1701                         u64 in_param, u64 *out_param)
1702 {
1703         int cqn;
1704         int err;
1705
1706         switch (op) {
1707         case RES_OP_RESERVE_AND_MAP:
1708                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1709                 if (err)
1710                         break;
1711
1712                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1713                 if (err) {
1714                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1715                         break;
1716                 }
1717
1718                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1719                 if (err) {
1720                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1721                         __mlx4_cq_free_icm(dev, cqn);
1722                         break;
1723                 }
1724
1725                 set_param_l(out_param, cqn);
1726                 break;
1727
1728         default:
1729                 err = -EINVAL;
1730         }
1731
1732         return err;
1733 }
1734
1735 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1736                          u64 in_param, u64 *out_param)
1737 {
1738         int srqn;
1739         int err;
1740
1741         switch (op) {
1742         case RES_OP_RESERVE_AND_MAP:
1743                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1744                 if (err)
1745                         break;
1746
1747                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1748                 if (err) {
1749                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1750                         break;
1751                 }
1752
1753                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1754                 if (err) {
1755                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1756                         __mlx4_srq_free_icm(dev, srqn);
1757                         break;
1758                 }
1759
1760                 set_param_l(out_param, srqn);
1761                 break;
1762
1763         default:
1764                 err = -EINVAL;
1765         }
1766
1767         return err;
1768 }
1769
1770 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1771                                      u8 smac_index, u64 *mac)
1772 {
1773         struct mlx4_priv *priv = mlx4_priv(dev);
1774         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1775         struct list_head *mac_list =
1776                 &tracker->slave_list[slave].res_list[RES_MAC];
1777         struct mac_res *res, *tmp;
1778
1779         list_for_each_entry_safe(res, tmp, mac_list, list) {
1780                 if (res->smac_index == smac_index && res->port == (u8) port) {
1781                         *mac = res->mac;
1782                         return 0;
1783                 }
1784         }
1785         return -ENOENT;
1786 }
1787
1788 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1789 {
1790         struct mlx4_priv *priv = mlx4_priv(dev);
1791         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1792         struct list_head *mac_list =
1793                 &tracker->slave_list[slave].res_list[RES_MAC];
1794         struct mac_res *res, *tmp;
1795
1796         list_for_each_entry_safe(res, tmp, mac_list, list) {
1797                 if (res->mac == mac && res->port == (u8) port) {
1798                         /* mac found. update ref count */
1799                         ++res->ref_count;
1800                         return 0;
1801                 }
1802         }
1803
1804         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1805                 return -EINVAL;
1806         res = kzalloc(sizeof *res, GFP_KERNEL);
1807         if (!res) {
1808                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1809                 return -ENOMEM;
1810         }
1811         res->mac = mac;
1812         res->port = (u8) port;
1813         res->smac_index = smac_index;
1814         res->ref_count = 1;
1815         list_add_tail(&res->list,
1816                       &tracker->slave_list[slave].res_list[RES_MAC]);
1817         return 0;
1818 }
1819
1820 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1821                                int port)
1822 {
1823         struct mlx4_priv *priv = mlx4_priv(dev);
1824         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1825         struct list_head *mac_list =
1826                 &tracker->slave_list[slave].res_list[RES_MAC];
1827         struct mac_res *res, *tmp;
1828
1829         list_for_each_entry_safe(res, tmp, mac_list, list) {
1830                 if (res->mac == mac && res->port == (u8) port) {
1831                         if (!--res->ref_count) {
1832                                 list_del(&res->list);
1833                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1834                                 kfree(res);
1835                         }
1836                         break;
1837                 }
1838         }
1839 }
1840
1841 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1842 {
1843         struct mlx4_priv *priv = mlx4_priv(dev);
1844         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1845         struct list_head *mac_list =
1846                 &tracker->slave_list[slave].res_list[RES_MAC];
1847         struct mac_res *res, *tmp;
1848         int i;
1849
1850         list_for_each_entry_safe(res, tmp, mac_list, list) {
1851                 list_del(&res->list);
1852                 /* dereference the mac the num times the slave referenced it */
1853                 for (i = 0; i < res->ref_count; i++)
1854                         __mlx4_unregister_mac(dev, res->port, res->mac);
1855                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1856                 kfree(res);
1857         }
1858 }
1859
1860 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1861                          u64 in_param, u64 *out_param, int in_port)
1862 {
1863         int err = -EINVAL;
1864         int port;
1865         u64 mac;
1866         u8 smac_index;
1867
1868         if (op != RES_OP_RESERVE_AND_MAP)
1869                 return err;
1870
1871         port = !in_port ? get_param_l(out_param) : in_port;
1872         port = mlx4_slave_convert_port(
1873                         dev, slave, port);
1874
1875         if (port < 0)
1876                 return -EINVAL;
1877         mac = in_param;
1878
1879         err = __mlx4_register_mac(dev, port, mac);
1880         if (err >= 0) {
1881                 smac_index = err;
1882                 set_param_l(out_param, err);
1883                 err = 0;
1884         }
1885
1886         if (!err) {
1887                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1888                 if (err)
1889                         __mlx4_unregister_mac(dev, port, mac);
1890         }
1891         return err;
1892 }
1893
1894 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1895                              int port, int vlan_index)
1896 {
1897         struct mlx4_priv *priv = mlx4_priv(dev);
1898         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1899         struct list_head *vlan_list =
1900                 &tracker->slave_list[slave].res_list[RES_VLAN];
1901         struct vlan_res *res, *tmp;
1902
1903         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1904                 if (res->vlan == vlan && res->port == (u8) port) {
1905                         /* vlan found. update ref count */
1906                         ++res->ref_count;
1907                         return 0;
1908                 }
1909         }
1910
1911         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1912                 return -EINVAL;
1913         res = kzalloc(sizeof(*res), GFP_KERNEL);
1914         if (!res) {
1915                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1916                 return -ENOMEM;
1917         }
1918         res->vlan = vlan;
1919         res->port = (u8) port;
1920         res->vlan_index = vlan_index;
1921         res->ref_count = 1;
1922         list_add_tail(&res->list,
1923                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1924         return 0;
1925 }
1926
1927
1928 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1929                                 int port)
1930 {
1931         struct mlx4_priv *priv = mlx4_priv(dev);
1932         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1933         struct list_head *vlan_list =
1934                 &tracker->slave_list[slave].res_list[RES_VLAN];
1935         struct vlan_res *res, *tmp;
1936
1937         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1938                 if (res->vlan == vlan && res->port == (u8) port) {
1939                         if (!--res->ref_count) {
1940                                 list_del(&res->list);
1941                                 mlx4_release_resource(dev, slave, RES_VLAN,
1942                                                       1, port);
1943                                 kfree(res);
1944                         }
1945                         break;
1946                 }
1947         }
1948 }
1949
1950 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1951 {
1952         struct mlx4_priv *priv = mlx4_priv(dev);
1953         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1954         struct list_head *vlan_list =
1955                 &tracker->slave_list[slave].res_list[RES_VLAN];
1956         struct vlan_res *res, *tmp;
1957         int i;
1958
1959         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1960                 list_del(&res->list);
1961                 /* dereference the vlan the num times the slave referenced it */
1962                 for (i = 0; i < res->ref_count; i++)
1963                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1964                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1965                 kfree(res);
1966         }
1967 }
1968
1969 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1970                           u64 in_param, u64 *out_param, int in_port)
1971 {
1972         struct mlx4_priv *priv = mlx4_priv(dev);
1973         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1974         int err;
1975         u16 vlan;
1976         int vlan_index;
1977         int port;
1978
1979         port = !in_port ? get_param_l(out_param) : in_port;
1980
1981         if (!port || op != RES_OP_RESERVE_AND_MAP)
1982                 return -EINVAL;
1983
1984         port = mlx4_slave_convert_port(
1985                         dev, slave, port);
1986
1987         if (port < 0)
1988                 return -EINVAL;
1989         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1990         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1991                 slave_state[slave].old_vlan_api = true;
1992                 return 0;
1993         }
1994
1995         vlan = (u16) in_param;
1996
1997         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1998         if (!err) {
1999                 set_param_l(out_param, (u32) vlan_index);
2000                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2001                 if (err)
2002                         __mlx4_unregister_vlan(dev, port, vlan);
2003         }
2004         return err;
2005 }
2006
2007 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2008                              u64 in_param, u64 *out_param)
2009 {
2010         u32 index;
2011         int err;
2012
2013         if (op != RES_OP_RESERVE)
2014                 return -EINVAL;
2015
2016         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2017         if (err)
2018                 return err;
2019
2020         err = __mlx4_counter_alloc(dev, &index);
2021         if (err) {
2022                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2023                 return err;
2024         }
2025
2026         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2027         if (err) {
2028                 __mlx4_counter_free(dev, index);
2029                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2030         } else {
2031                 set_param_l(out_param, index);
2032         }
2033
2034         return err;
2035 }
2036
2037 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2038                            u64 in_param, u64 *out_param)
2039 {
2040         u32 xrcdn;
2041         int err;
2042
2043         if (op != RES_OP_RESERVE)
2044                 return -EINVAL;
2045
2046         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2047         if (err)
2048                 return err;
2049
2050         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2051         if (err)
2052                 __mlx4_xrcd_free(dev, xrcdn);
2053         else
2054                 set_param_l(out_param, xrcdn);
2055
2056         return err;
2057 }
2058
2059 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2060                            struct mlx4_vhcr *vhcr,
2061                            struct mlx4_cmd_mailbox *inbox,
2062                            struct mlx4_cmd_mailbox *outbox,
2063                            struct mlx4_cmd_info *cmd)
2064 {
2065         int err;
2066         int alop = vhcr->op_modifier;
2067
2068         switch (vhcr->in_modifier & 0xFF) {
2069         case RES_QP:
2070                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2071                                    vhcr->in_param, &vhcr->out_param);
2072                 break;
2073
2074         case RES_MTT:
2075                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2076                                     vhcr->in_param, &vhcr->out_param);
2077                 break;
2078
2079         case RES_MPT:
2080                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2081                                     vhcr->in_param, &vhcr->out_param);
2082                 break;
2083
2084         case RES_CQ:
2085                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2086                                    vhcr->in_param, &vhcr->out_param);
2087                 break;
2088
2089         case RES_SRQ:
2090                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2091                                     vhcr->in_param, &vhcr->out_param);
2092                 break;
2093
2094         case RES_MAC:
2095                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2096                                     vhcr->in_param, &vhcr->out_param,
2097                                     (vhcr->in_modifier >> 8) & 0xFF);
2098                 break;
2099
2100         case RES_VLAN:
2101                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2102                                      vhcr->in_param, &vhcr->out_param,
2103                                      (vhcr->in_modifier >> 8) & 0xFF);
2104                 break;
2105
2106         case RES_COUNTER:
2107                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2108                                         vhcr->in_param, &vhcr->out_param);
2109                 break;
2110
2111         case RES_XRCD:
2112                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2113                                       vhcr->in_param, &vhcr->out_param);
2114                 break;
2115
2116         default:
2117                 err = -EINVAL;
2118                 break;
2119         }
2120
2121         return err;
2122 }
2123
2124 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2125                        u64 in_param)
2126 {
2127         int err;
2128         int count;
2129         int base;
2130         int qpn;
2131
2132         switch (op) {
2133         case RES_OP_RESERVE:
2134                 base = get_param_l(&in_param) & 0x7fffff;
2135                 count = get_param_h(&in_param);
2136                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2137                 if (err)
2138                         break;
2139                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2140                 __mlx4_qp_release_range(dev, base, count);
2141                 break;
2142         case RES_OP_MAP_ICM:
2143                 qpn = get_param_l(&in_param) & 0x7fffff;
2144                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2145                                            NULL, 0);
2146                 if (err)
2147                         return err;
2148
2149                 if (!fw_reserved(dev, qpn))
2150                         __mlx4_qp_free_icm(dev, qpn);
2151
2152                 res_end_move(dev, slave, RES_QP, qpn);
2153
2154                 if (valid_reserved(dev, slave, qpn))
2155                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2156                 break;
2157         default:
2158                 err = -EINVAL;
2159                 break;
2160         }
2161         return err;
2162 }
2163
2164 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2165                         u64 in_param, u64 *out_param)
2166 {
2167         int err = -EINVAL;
2168         int base;
2169         int order;
2170
2171         if (op != RES_OP_RESERVE_AND_MAP)
2172                 return err;
2173
2174         base = get_param_l(&in_param);
2175         order = get_param_h(&in_param);
2176         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2177         if (!err) {
2178                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2179                 __mlx4_free_mtt_range(dev, base, order);
2180         }
2181         return err;
2182 }
2183
2184 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2185                         u64 in_param)
2186 {
2187         int err = -EINVAL;
2188         int index;
2189         int id;
2190         struct res_mpt *mpt;
2191
2192         switch (op) {
2193         case RES_OP_RESERVE:
2194                 index = get_param_l(&in_param);
2195                 id = index & mpt_mask(dev);
2196                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2197                 if (err)
2198                         break;
2199                 index = mpt->key;
2200                 put_res(dev, slave, id, RES_MPT);
2201
2202                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2203                 if (err)
2204                         break;
2205                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2206                 __mlx4_mpt_release(dev, index);
2207                 break;
2208         case RES_OP_MAP_ICM:
2209                         index = get_param_l(&in_param);
2210                         id = index & mpt_mask(dev);
2211                         err = mr_res_start_move_to(dev, slave, id,
2212                                                    RES_MPT_RESERVED, &mpt);
2213                         if (err)
2214                                 return err;
2215
2216                         __mlx4_mpt_free_icm(dev, mpt->key);
2217                         res_end_move(dev, slave, RES_MPT, id);
2218                         return err;
2219                 break;
2220         default:
2221                 err = -EINVAL;
2222                 break;
2223         }
2224         return err;
2225 }
2226
2227 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2228                        u64 in_param, u64 *out_param)
2229 {
2230         int cqn;
2231         int err;
2232
2233         switch (op) {
2234         case RES_OP_RESERVE_AND_MAP:
2235                 cqn = get_param_l(&in_param);
2236                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2237                 if (err)
2238                         break;
2239
2240                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2241                 __mlx4_cq_free_icm(dev, cqn);
2242                 break;
2243
2244         default:
2245                 err = -EINVAL;
2246                 break;
2247         }
2248
2249         return err;
2250 }
2251
2252 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2253                         u64 in_param, u64 *out_param)
2254 {
2255         int srqn;
2256         int err;
2257
2258         switch (op) {
2259         case RES_OP_RESERVE_AND_MAP:
2260                 srqn = get_param_l(&in_param);
2261                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2262                 if (err)
2263                         break;
2264
2265                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2266                 __mlx4_srq_free_icm(dev, srqn);
2267                 break;
2268
2269         default:
2270                 err = -EINVAL;
2271                 break;
2272         }
2273
2274         return err;
2275 }
2276
2277 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2278                             u64 in_param, u64 *out_param, int in_port)
2279 {
2280         int port;
2281         int err = 0;
2282
2283         switch (op) {
2284         case RES_OP_RESERVE_AND_MAP:
2285                 port = !in_port ? get_param_l(out_param) : in_port;
2286                 port = mlx4_slave_convert_port(
2287                                 dev, slave, port);
2288
2289                 if (port < 0)
2290                         return -EINVAL;
2291                 mac_del_from_slave(dev, slave, in_param, port);
2292                 __mlx4_unregister_mac(dev, port, in_param);
2293                 break;
2294         default:
2295                 err = -EINVAL;
2296                 break;
2297         }
2298
2299         return err;
2300
2301 }
2302
2303 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2304                             u64 in_param, u64 *out_param, int port)
2305 {
2306         struct mlx4_priv *priv = mlx4_priv(dev);
2307         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2308         int err = 0;
2309
2310         port = mlx4_slave_convert_port(
2311                         dev, slave, port);
2312
2313         if (port < 0)
2314                 return -EINVAL;
2315         switch (op) {
2316         case RES_OP_RESERVE_AND_MAP:
2317                 if (slave_state[slave].old_vlan_api)
2318                         return 0;
2319                 if (!port)
2320                         return -EINVAL;
2321                 vlan_del_from_slave(dev, slave, in_param, port);
2322                 __mlx4_unregister_vlan(dev, port, in_param);
2323                 break;
2324         default:
2325                 err = -EINVAL;
2326                 break;
2327         }
2328
2329         return err;
2330 }
2331
2332 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2333                             u64 in_param, u64 *out_param)
2334 {
2335         int index;
2336         int err;
2337
2338         if (op != RES_OP_RESERVE)
2339                 return -EINVAL;
2340
2341         index = get_param_l(&in_param);
2342         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2343         if (err)
2344                 return err;
2345
2346         __mlx4_counter_free(dev, index);
2347         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2348
2349         return err;
2350 }
2351
2352 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2353                           u64 in_param, u64 *out_param)
2354 {
2355         int xrcdn;
2356         int err;
2357
2358         if (op != RES_OP_RESERVE)
2359                 return -EINVAL;
2360
2361         xrcdn = get_param_l(&in_param);
2362         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2363         if (err)
2364                 return err;
2365
2366         __mlx4_xrcd_free(dev, xrcdn);
2367
2368         return err;
2369 }
2370
2371 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2372                           struct mlx4_vhcr *vhcr,
2373                           struct mlx4_cmd_mailbox *inbox,
2374                           struct mlx4_cmd_mailbox *outbox,
2375                           struct mlx4_cmd_info *cmd)
2376 {
2377         int err = -EINVAL;
2378         int alop = vhcr->op_modifier;
2379
2380         switch (vhcr->in_modifier & 0xFF) {
2381         case RES_QP:
2382                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2383                                   vhcr->in_param);
2384                 break;
2385
2386         case RES_MTT:
2387                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2388                                    vhcr->in_param, &vhcr->out_param);
2389                 break;
2390
2391         case RES_MPT:
2392                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2393                                    vhcr->in_param);
2394                 break;
2395
2396         case RES_CQ:
2397                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2398                                   vhcr->in_param, &vhcr->out_param);
2399                 break;
2400
2401         case RES_SRQ:
2402                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2403                                    vhcr->in_param, &vhcr->out_param);
2404                 break;
2405
2406         case RES_MAC:
2407                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2408                                    vhcr->in_param, &vhcr->out_param,
2409                                    (vhcr->in_modifier >> 8) & 0xFF);
2410                 break;
2411
2412         case RES_VLAN:
2413                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2414                                     vhcr->in_param, &vhcr->out_param,
2415                                     (vhcr->in_modifier >> 8) & 0xFF);
2416                 break;
2417
2418         case RES_COUNTER:
2419                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2420                                        vhcr->in_param, &vhcr->out_param);
2421                 break;
2422
2423         case RES_XRCD:
2424                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2425                                      vhcr->in_param, &vhcr->out_param);
2426
2427         default:
2428                 break;
2429         }
2430         return err;
2431 }
2432
2433 /* ugly but other choices are uglier */
2434 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2435 {
2436         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2437 }
2438
2439 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2440 {
2441         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2442 }
2443
2444 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2445 {
2446         return be32_to_cpu(mpt->mtt_sz);
2447 }
2448
2449 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2450 {
2451         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2452 }
2453
2454 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2455 {
2456         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2457 }
2458
2459 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2460 {
2461         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2462 }
2463
2464 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2465 {
2466         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2467 }
2468
2469 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2470 {
2471         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2472 }
2473
2474 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2475 {
2476         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2477 }
2478
2479 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2480 {
2481         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2482         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2483         int log_sq_sride = qpc->sq_size_stride & 7;
2484         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2485         int log_rq_stride = qpc->rq_size_stride & 7;
2486         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2487         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2488         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2489         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2490         int sq_size;
2491         int rq_size;
2492         int total_pages;
2493         int total_mem;
2494         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2495
2496         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2497         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2498         total_mem = sq_size + rq_size;
2499         total_pages =
2500                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2501                                    page_shift);
2502
2503         return total_pages;
2504 }
2505
2506 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2507                            int size, struct res_mtt *mtt)
2508 {
2509         int res_start = mtt->com.res_id;
2510         int res_size = (1 << mtt->order);
2511
2512         if (start < res_start || start + size > res_start + res_size)
2513                 return -EPERM;
2514         return 0;
2515 }
2516
2517 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2518                            struct mlx4_vhcr *vhcr,
2519                            struct mlx4_cmd_mailbox *inbox,
2520                            struct mlx4_cmd_mailbox *outbox,
2521                            struct mlx4_cmd_info *cmd)
2522 {
2523         int err;
2524         int index = vhcr->in_modifier;
2525         struct res_mtt *mtt;
2526         struct res_mpt *mpt;
2527         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2528         int phys;
2529         int id;
2530         u32 pd;
2531         int pd_slave;
2532
2533         id = index & mpt_mask(dev);
2534         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2535         if (err)
2536                 return err;
2537
2538         /* Disable memory windows for VFs. */
2539         if (!mr_is_region(inbox->buf)) {
2540                 err = -EPERM;
2541                 goto ex_abort;
2542         }
2543
2544         /* Make sure that the PD bits related to the slave id are zeros. */
2545         pd = mr_get_pd(inbox->buf);
2546         pd_slave = (pd >> 17) & 0x7f;
2547         if (pd_slave != 0 && --pd_slave != slave) {
2548                 err = -EPERM;
2549                 goto ex_abort;
2550         }
2551
2552         if (mr_is_fmr(inbox->buf)) {
2553                 /* FMR and Bind Enable are forbidden in slave devices. */
2554                 if (mr_is_bind_enabled(inbox->buf)) {
2555                         err = -EPERM;
2556                         goto ex_abort;
2557                 }
2558                 /* FMR and Memory Windows are also forbidden. */
2559                 if (!mr_is_region(inbox->buf)) {
2560                         err = -EPERM;
2561                         goto ex_abort;
2562                 }
2563         }
2564
2565         phys = mr_phys_mpt(inbox->buf);
2566         if (!phys) {
2567                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2568                 if (err)
2569                         goto ex_abort;
2570
2571                 err = check_mtt_range(dev, slave, mtt_base,
2572                                       mr_get_mtt_size(inbox->buf), mtt);
2573                 if (err)
2574                         goto ex_put;
2575
2576                 mpt->mtt = mtt;
2577         }
2578
2579         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2580         if (err)
2581                 goto ex_put;
2582
2583         if (!phys) {
2584                 atomic_inc(&mtt->ref_count);
2585                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2586         }
2587
2588         res_end_move(dev, slave, RES_MPT, id);
2589         return 0;
2590
2591 ex_put:
2592         if (!phys)
2593                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2594 ex_abort:
2595         res_abort_move(dev, slave, RES_MPT, id);
2596
2597         return err;
2598 }
2599
2600 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2601                            struct mlx4_vhcr *vhcr,
2602                            struct mlx4_cmd_mailbox *inbox,
2603                            struct mlx4_cmd_mailbox *outbox,
2604                            struct mlx4_cmd_info *cmd)
2605 {
2606         int err;
2607         int index = vhcr->in_modifier;
2608         struct res_mpt *mpt;
2609         int id;
2610
2611         id = index & mpt_mask(dev);
2612         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2613         if (err)
2614                 return err;
2615
2616         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2617         if (err)
2618                 goto ex_abort;
2619
2620         if (mpt->mtt)
2621                 atomic_dec(&mpt->mtt->ref_count);
2622
2623         res_end_move(dev, slave, RES_MPT, id);
2624         return 0;
2625
2626 ex_abort:
2627         res_abort_move(dev, slave, RES_MPT, id);
2628
2629         return err;
2630 }
2631
2632 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2633                            struct mlx4_vhcr *vhcr,
2634                            struct mlx4_cmd_mailbox *inbox,
2635                            struct mlx4_cmd_mailbox *outbox,
2636                            struct mlx4_cmd_info *cmd)
2637 {
2638         int err;
2639         int index = vhcr->in_modifier;
2640         struct res_mpt *mpt;
2641         int id;
2642
2643         id = index & mpt_mask(dev);
2644         err = get_res(dev, slave, id, RES_MPT, &mpt);
2645         if (err)
2646                 return err;
2647
2648         if (mpt->com.from_state == RES_MPT_MAPPED) {
2649                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2650                  * that, the VF must read the MPT. But since the MPT entry memory is not
2651                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2652                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2653                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2654                  * ownership fofollowing the change. The change here allows the VF to
2655                  * perform QUERY_MPT also when the entry is in SW ownership.
2656                  */
2657                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2658                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2659                                         mpt->key, NULL);
2660
2661                 if (NULL == mpt_entry || NULL == outbox->buf) {
2662                         err = -EINVAL;
2663                         goto out;
2664                 }
2665
2666                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2667
2668                 err = 0;
2669         } else if (mpt->com.from_state == RES_MPT_HW) {
2670                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2671         } else {
2672                 err = -EBUSY;
2673                 goto out;
2674         }
2675
2676
2677 out:
2678         put_res(dev, slave, id, RES_MPT);
2679         return err;
2680 }
2681
2682 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2683 {
2684         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2685 }
2686
2687 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2688 {
2689         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2690 }
2691
2692 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2693 {
2694         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2695 }
2696
2697 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2698                                   struct mlx4_qp_context *context)
2699 {
2700         u32 qpn = vhcr->in_modifier & 0xffffff;
2701         u32 qkey = 0;
2702
2703         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2704                 return;
2705
2706         /* adjust qkey in qp context */
2707         context->qkey = cpu_to_be32(qkey);
2708 }
2709
2710 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2711                              struct mlx4_vhcr *vhcr,
2712                              struct mlx4_cmd_mailbox *inbox,
2713                              struct mlx4_cmd_mailbox *outbox,
2714                              struct mlx4_cmd_info *cmd)
2715 {
2716         int err;
2717         int qpn = vhcr->in_modifier & 0x7fffff;
2718         struct res_mtt *mtt;
2719         struct res_qp *qp;
2720         struct mlx4_qp_context *qpc = inbox->buf + 8;
2721         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2722         int mtt_size = qp_get_mtt_size(qpc);
2723         struct res_cq *rcq;
2724         struct res_cq *scq;
2725         int rcqn = qp_get_rcqn(qpc);
2726         int scqn = qp_get_scqn(qpc);
2727         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2728         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2729         struct res_srq *srq;
2730         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2731
2732         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2733         if (err)
2734                 return err;
2735         qp->local_qpn = local_qpn;
2736         qp->sched_queue = 0;
2737         qp->param3 = 0;
2738         qp->vlan_control = 0;
2739         qp->fvl_rx = 0;
2740         qp->pri_path_fl = 0;
2741         qp->vlan_index = 0;
2742         qp->feup = 0;
2743         qp->qpc_flags = be32_to_cpu(qpc->flags);
2744
2745         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2746         if (err)
2747                 goto ex_abort;
2748
2749         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2750         if (err)
2751                 goto ex_put_mtt;
2752
2753         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2754         if (err)
2755                 goto ex_put_mtt;
2756
2757         if (scqn != rcqn) {
2758                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2759                 if (err)
2760                         goto ex_put_rcq;
2761         } else
2762                 scq = rcq;
2763
2764         if (use_srq) {
2765                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2766                 if (err)
2767                         goto ex_put_scq;
2768         }
2769
2770         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2771         update_pkey_index(dev, slave, inbox);
2772         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2773         if (err)
2774                 goto ex_put_srq;
2775         atomic_inc(&mtt->ref_count);
2776         qp->mtt = mtt;
2777         atomic_inc(&rcq->ref_count);
2778         qp->rcq = rcq;
2779         atomic_inc(&scq->ref_count);
2780         qp->scq = scq;
2781
2782         if (scqn != rcqn)
2783                 put_res(dev, slave, scqn, RES_CQ);
2784
2785         if (use_srq) {
2786                 atomic_inc(&srq->ref_count);
2787                 put_res(dev, slave, srqn, RES_SRQ);
2788                 qp->srq = srq;
2789         }
2790         put_res(dev, slave, rcqn, RES_CQ);
2791         put_res(dev, slave, mtt_base, RES_MTT);
2792         res_end_move(dev, slave, RES_QP, qpn);
2793
2794         return 0;
2795
2796 ex_put_srq:
2797         if (use_srq)
2798                 put_res(dev, slave, srqn, RES_SRQ);
2799 ex_put_scq:
2800         if (scqn != rcqn)
2801                 put_res(dev, slave, scqn, RES_CQ);
2802 ex_put_rcq:
2803         put_res(dev, slave, rcqn, RES_CQ);
2804 ex_put_mtt:
2805         put_res(dev, slave, mtt_base, RES_MTT);
2806 ex_abort:
2807         res_abort_move(dev, slave, RES_QP, qpn);
2808
2809         return err;
2810 }
2811
2812 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2813 {
2814         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2815 }
2816
2817 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2818 {
2819         int log_eq_size = eqc->log_eq_size & 0x1f;
2820         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2821
2822         if (log_eq_size + 5 < page_shift)
2823                 return 1;
2824
2825         return 1 << (log_eq_size + 5 - page_shift);
2826 }
2827
2828 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2829 {
2830         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2831 }
2832
2833 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2834 {
2835         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2836         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2837
2838         if (log_cq_size + 5 < page_shift)
2839                 return 1;
2840
2841         return 1 << (log_cq_size + 5 - page_shift);
2842 }
2843
2844 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2845                           struct mlx4_vhcr *vhcr,
2846                           struct mlx4_cmd_mailbox *inbox,
2847                           struct mlx4_cmd_mailbox *outbox,
2848                           struct mlx4_cmd_info *cmd)
2849 {
2850         int err;
2851         int eqn = vhcr->in_modifier;
2852         int res_id = (slave << 8) | eqn;
2853         struct mlx4_eq_context *eqc = inbox->buf;
2854         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2855         int mtt_size = eq_get_mtt_size(eqc);
2856         struct res_eq *eq;
2857         struct res_mtt *mtt;
2858
2859         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2860         if (err)
2861                 return err;
2862         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2863         if (err)
2864                 goto out_add;
2865
2866         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2867         if (err)
2868                 goto out_move;
2869
2870         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2871         if (err)
2872                 goto out_put;
2873
2874         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2875         if (err)
2876                 goto out_put;
2877
2878         atomic_inc(&mtt->ref_count);
2879         eq->mtt = mtt;
2880         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2881         res_end_move(dev, slave, RES_EQ, res_id);
2882         return 0;
2883
2884 out_put:
2885         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2886 out_move:
2887         res_abort_move(dev, slave, RES_EQ, res_id);
2888 out_add:
2889         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2890         return err;
2891 }
2892
2893 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2894                             struct mlx4_vhcr *vhcr,
2895                             struct mlx4_cmd_mailbox *inbox,
2896                             struct mlx4_cmd_mailbox *outbox,
2897                             struct mlx4_cmd_info *cmd)
2898 {
2899         int err;
2900         u8 get = vhcr->op_modifier;
2901
2902         if (get != 1)
2903                 return -EPERM;
2904
2905         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2906
2907         return err;
2908 }
2909
2910 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2911                               int len, struct res_mtt **res)
2912 {
2913         struct mlx4_priv *priv = mlx4_priv(dev);
2914         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2915         struct res_mtt *mtt;
2916         int err = -EINVAL;
2917
2918         spin_lock_irq(mlx4_tlock(dev));
2919         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2920                             com.list) {
2921                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2922                         *res = mtt;
2923                         mtt->com.from_state = mtt->com.state;
2924                         mtt->com.state = RES_MTT_BUSY;
2925                         err = 0;
2926                         break;
2927                 }
2928         }
2929         spin_unlock_irq(mlx4_tlock(dev));
2930
2931         return err;
2932 }
2933
2934 static int verify_qp_parameters(struct mlx4_dev *dev,
2935                                 struct mlx4_vhcr *vhcr,
2936                                 struct mlx4_cmd_mailbox *inbox,
2937                                 enum qp_transition transition, u8 slave)
2938 {
2939         u32                     qp_type;
2940         u32                     qpn;
2941         struct mlx4_qp_context  *qp_ctx;
2942         enum mlx4_qp_optpar     optpar;
2943         int port;
2944         int num_gids;
2945
2946         qp_ctx  = inbox->buf + 8;
2947         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2948         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2949
2950         if (slave != mlx4_master_func_num(dev))
2951                 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2952
2953         switch (qp_type) {
2954         case MLX4_QP_ST_RC:
2955         case MLX4_QP_ST_XRC:
2956         case MLX4_QP_ST_UC:
2957                 switch (transition) {
2958                 case QP_TRANS_INIT2RTR:
2959                 case QP_TRANS_RTR2RTS:
2960                 case QP_TRANS_RTS2RTS:
2961                 case QP_TRANS_SQD2SQD:
2962                 case QP_TRANS_SQD2RTS:
2963                         if (slave != mlx4_master_func_num(dev))
2964                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2965                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2966                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2967                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2968                                         else
2969                                                 num_gids = 1;
2970                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
2971                                                 return -EINVAL;
2972                                 }
2973                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2974                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2975                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2976                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2977                                         else
2978                                                 num_gids = 1;
2979                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
2980                                                 return -EINVAL;
2981                                 }
2982                         break;
2983                 default:
2984                         break;
2985                 }
2986                 break;
2987
2988         case MLX4_QP_ST_MLX:
2989                 qpn = vhcr->in_modifier & 0x7fffff;
2990                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2991                 if (transition == QP_TRANS_INIT2RTR &&
2992                     slave != mlx4_master_func_num(dev) &&
2993                     mlx4_is_qp_reserved(dev, qpn) &&
2994                     !mlx4_vf_smi_enabled(dev, slave, port)) {
2995                         /* only enabled VFs may create MLX proxy QPs */
2996                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2997                                  __func__, slave, port);
2998                         return -EPERM;
2999                 }
3000                 break;
3001
3002         default:
3003                 break;
3004         }
3005
3006         return 0;
3007 }
3008
3009 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3010                            struct mlx4_vhcr *vhcr,
3011                            struct mlx4_cmd_mailbox *inbox,
3012                            struct mlx4_cmd_mailbox *outbox,
3013                            struct mlx4_cmd_info *cmd)
3014 {
3015         struct mlx4_mtt mtt;
3016         __be64 *page_list = inbox->buf;
3017         u64 *pg_list = (u64 *)page_list;
3018         int i;
3019         struct res_mtt *rmtt = NULL;
3020         int start = be64_to_cpu(page_list[0]);
3021         int npages = vhcr->in_modifier;
3022         int err;
3023
3024         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3025         if (err)
3026                 return err;
3027
3028         /* Call the SW implementation of write_mtt:
3029          * - Prepare a dummy mtt struct
3030          * - Translate inbox contents to simple addresses in host endianess */
3031         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3032                             we don't really use it */
3033         mtt.order = 0;
3034         mtt.page_shift = 0;
3035         for (i = 0; i < npages; ++i)
3036                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3037
3038         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3039                                ((u64 *)page_list + 2));
3040
3041         if (rmtt)
3042                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3043
3044         return err;
3045 }
3046
3047 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3048                           struct mlx4_vhcr *vhcr,
3049                           struct mlx4_cmd_mailbox *inbox,
3050                           struct mlx4_cmd_mailbox *outbox,
3051                           struct mlx4_cmd_info *cmd)
3052 {
3053         int eqn = vhcr->in_modifier;
3054         int res_id = eqn | (slave << 8);
3055         struct res_eq *eq;
3056         int err;
3057
3058         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3059         if (err)
3060                 return err;
3061
3062         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3063         if (err)
3064                 goto ex_abort;
3065
3066         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3067         if (err)
3068                 goto ex_put;
3069
3070         atomic_dec(&eq->mtt->ref_count);
3071         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3072         res_end_move(dev, slave, RES_EQ, res_id);
3073         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3074
3075         return 0;
3076
3077 ex_put:
3078         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3079 ex_abort:
3080         res_abort_move(dev, slave, RES_EQ, res_id);
3081
3082         return err;
3083 }
3084
3085 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3086 {
3087         struct mlx4_priv *priv = mlx4_priv(dev);
3088         struct mlx4_slave_event_eq_info *event_eq;
3089         struct mlx4_cmd_mailbox *mailbox;
3090         u32 in_modifier = 0;
3091         int err;
3092         int res_id;
3093         struct res_eq *req;
3094
3095         if (!priv->mfunc.master.slave_state)
3096                 return -EINVAL;
3097
3098         /* check for slave valid, slave not PF, and slave active */
3099         if (slave < 0 || slave > dev->persist->num_vfs ||
3100             slave == dev->caps.function ||
3101             !priv->mfunc.master.slave_state[slave].active)
3102                 return 0;
3103
3104         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3105
3106         /* Create the event only if the slave is registered */
3107         if (event_eq->eqn < 0)
3108                 return 0;
3109
3110         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3111         res_id = (slave << 8) | event_eq->eqn;
3112         err = get_res(dev, slave, res_id, RES_EQ, &req);
3113         if (err)
3114                 goto unlock;
3115
3116         if (req->com.from_state != RES_EQ_HW) {
3117                 err = -EINVAL;
3118                 goto put;
3119         }
3120
3121         mailbox = mlx4_alloc_cmd_mailbox(dev);
3122         if (IS_ERR(mailbox)) {
3123                 err = PTR_ERR(mailbox);
3124                 goto put;
3125         }
3126
3127         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3128                 ++event_eq->token;
3129                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3130         }
3131
3132         memcpy(mailbox->buf, (u8 *) eqe, 28);
3133
3134         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3135
3136         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3137                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3138                        MLX4_CMD_NATIVE);
3139
3140         put_res(dev, slave, res_id, RES_EQ);
3141         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3142         mlx4_free_cmd_mailbox(dev, mailbox);
3143         return err;
3144
3145 put:
3146         put_res(dev, slave, res_id, RES_EQ);
3147
3148 unlock:
3149         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3150         return err;
3151 }
3152
3153 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3154                           struct mlx4_vhcr *vhcr,
3155                           struct mlx4_cmd_mailbox *inbox,
3156                           struct mlx4_cmd_mailbox *outbox,
3157                           struct mlx4_cmd_info *cmd)
3158 {
3159         int eqn = vhcr->in_modifier;
3160         int res_id = eqn | (slave << 8);
3161         struct res_eq *eq;
3162         int err;
3163
3164         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3165         if (err)
3166                 return err;
3167
3168         if (eq->com.from_state != RES_EQ_HW) {
3169                 err = -EINVAL;
3170                 goto ex_put;
3171         }
3172
3173         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3174
3175 ex_put:
3176         put_res(dev, slave, res_id, RES_EQ);
3177         return err;
3178 }
3179
3180 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3181                           struct mlx4_vhcr *vhcr,
3182                           struct mlx4_cmd_mailbox *inbox,
3183                           struct mlx4_cmd_mailbox *outbox,
3184                           struct mlx4_cmd_info *cmd)
3185 {
3186         int err;
3187         int cqn = vhcr->in_modifier;
3188         struct mlx4_cq_context *cqc = inbox->buf;
3189         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3190         struct res_cq *cq;
3191         struct res_mtt *mtt;
3192
3193         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3194         if (err)
3195                 return err;
3196         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3197         if (err)
3198                 goto out_move;
3199         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3200         if (err)
3201                 goto out_put;
3202         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3203         if (err)
3204                 goto out_put;
3205         atomic_inc(&mtt->ref_count);
3206         cq->mtt = mtt;
3207         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3208         res_end_move(dev, slave, RES_CQ, cqn);
3209         return 0;
3210
3211 out_put:
3212         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3213 out_move:
3214         res_abort_move(dev, slave, RES_CQ, cqn);
3215         return err;
3216 }
3217
3218 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3219                           struct mlx4_vhcr *vhcr,
3220                           struct mlx4_cmd_mailbox *inbox,
3221                           struct mlx4_cmd_mailbox *outbox,
3222                           struct mlx4_cmd_info *cmd)
3223 {
3224         int err;
3225         int cqn = vhcr->in_modifier;
3226         struct res_cq *cq;
3227
3228         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3229         if (err)
3230                 return err;
3231         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3232         if (err)
3233                 goto out_move;
3234         atomic_dec(&cq->mtt->ref_count);
3235         res_end_move(dev, slave, RES_CQ, cqn);
3236         return 0;
3237
3238 out_move:
3239         res_abort_move(dev, slave, RES_CQ, cqn);
3240         return err;
3241 }
3242
3243 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3244                           struct mlx4_vhcr *vhcr,
3245                           struct mlx4_cmd_mailbox *inbox,
3246                           struct mlx4_cmd_mailbox *outbox,
3247                           struct mlx4_cmd_info *cmd)
3248 {
3249         int cqn = vhcr->in_modifier;
3250         struct res_cq *cq;
3251         int err;
3252
3253         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3254         if (err)
3255                 return err;
3256
3257         if (cq->com.from_state != RES_CQ_HW)
3258                 goto ex_put;
3259
3260         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3261 ex_put:
3262         put_res(dev, slave, cqn, RES_CQ);
3263
3264         return err;
3265 }
3266
3267 static int handle_resize(struct mlx4_dev *dev, int slave,
3268                          struct mlx4_vhcr *vhcr,
3269                          struct mlx4_cmd_mailbox *inbox,
3270                          struct mlx4_cmd_mailbox *outbox,
3271                          struct mlx4_cmd_info *cmd,
3272                          struct res_cq *cq)
3273 {
3274         int err;
3275         struct res_mtt *orig_mtt;
3276         struct res_mtt *mtt;
3277         struct mlx4_cq_context *cqc = inbox->buf;
3278         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3279
3280         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3281         if (err)
3282                 return err;
3283
3284         if (orig_mtt != cq->mtt) {
3285                 err = -EINVAL;
3286                 goto ex_put;
3287         }
3288
3289         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3290         if (err)
3291                 goto ex_put;
3292
3293         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3294         if (err)
3295                 goto ex_put1;
3296         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3297         if (err)
3298                 goto ex_put1;
3299         atomic_dec(&orig_mtt->ref_count);
3300         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3301         atomic_inc(&mtt->ref_count);
3302         cq->mtt = mtt;
3303         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3304         return 0;
3305
3306 ex_put1:
3307         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3308 ex_put:
3309         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3310
3311         return err;
3312
3313 }
3314
3315 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3316                            struct mlx4_vhcr *vhcr,
3317                            struct mlx4_cmd_mailbox *inbox,
3318                            struct mlx4_cmd_mailbox *outbox,
3319                            struct mlx4_cmd_info *cmd)
3320 {
3321         int cqn = vhcr->in_modifier;
3322         struct res_cq *cq;
3323         int err;
3324
3325         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3326         if (err)
3327                 return err;
3328
3329         if (cq->com.from_state != RES_CQ_HW)
3330                 goto ex_put;
3331
3332         if (vhcr->op_modifier == 0) {
3333                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3334                 goto ex_put;
3335         }
3336
3337         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3338 ex_put:
3339         put_res(dev, slave, cqn, RES_CQ);
3340
3341         return err;
3342 }
3343
3344 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3345 {
3346         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3347         int log_rq_stride = srqc->logstride & 7;
3348         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3349
3350         if (log_srq_size + log_rq_stride + 4 < page_shift)
3351                 return 1;
3352
3353         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3354 }
3355
3356 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3357                            struct mlx4_vhcr *vhcr,
3358                            struct mlx4_cmd_mailbox *inbox,
3359                            struct mlx4_cmd_mailbox *outbox,
3360                            struct mlx4_cmd_info *cmd)
3361 {
3362         int err;
3363         int srqn = vhcr->in_modifier;
3364         struct res_mtt *mtt;
3365         struct res_srq *srq;
3366         struct mlx4_srq_context *srqc = inbox->buf;
3367         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3368
3369         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3370                 return -EINVAL;
3371
3372         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3373         if (err)
3374                 return err;
3375         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3376         if (err)
3377                 goto ex_abort;
3378         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3379                               mtt);
3380         if (err)
3381                 goto ex_put_mtt;
3382
3383         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3384         if (err)
3385                 goto ex_put_mtt;
3386
3387         atomic_inc(&mtt->ref_count);
3388         srq->mtt = mtt;
3389         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3390         res_end_move(dev, slave, RES_SRQ, srqn);
3391         return 0;
3392
3393 ex_put_mtt:
3394         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3395 ex_abort:
3396         res_abort_move(dev, slave, RES_SRQ, srqn);
3397
3398         return err;
3399 }
3400
3401 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3402                            struct mlx4_vhcr *vhcr,
3403                            struct mlx4_cmd_mailbox *inbox,
3404                            struct mlx4_cmd_mailbox *outbox,
3405                            struct mlx4_cmd_info *cmd)
3406 {
3407         int err;
3408         int srqn = vhcr->in_modifier;
3409         struct res_srq *srq;
3410
3411         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3412         if (err)
3413                 return err;
3414         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3415         if (err)
3416                 goto ex_abort;
3417         atomic_dec(&srq->mtt->ref_count);
3418         if (srq->cq)
3419                 atomic_dec(&srq->cq->ref_count);
3420         res_end_move(dev, slave, RES_SRQ, srqn);
3421
3422         return 0;
3423
3424 ex_abort:
3425         res_abort_move(dev, slave, RES_SRQ, srqn);
3426
3427         return err;
3428 }
3429
3430 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3431                            struct mlx4_vhcr *vhcr,
3432                            struct mlx4_cmd_mailbox *inbox,
3433                            struct mlx4_cmd_mailbox *outbox,
3434                            struct mlx4_cmd_info *cmd)
3435 {
3436         int err;
3437         int srqn = vhcr->in_modifier;
3438         struct res_srq *srq;
3439
3440         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3441         if (err)
3442                 return err;
3443         if (srq->com.from_state != RES_SRQ_HW) {
3444                 err = -EBUSY;
3445                 goto out;
3446         }
3447         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3448 out:
3449         put_res(dev, slave, srqn, RES_SRQ);
3450         return err;
3451 }
3452
3453 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3454                          struct mlx4_vhcr *vhcr,
3455                          struct mlx4_cmd_mailbox *inbox,
3456                          struct mlx4_cmd_mailbox *outbox,
3457                          struct mlx4_cmd_info *cmd)
3458 {
3459         int err;
3460         int srqn = vhcr->in_modifier;
3461         struct res_srq *srq;
3462
3463         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3464         if (err)
3465                 return err;
3466
3467         if (srq->com.from_state != RES_SRQ_HW) {
3468                 err = -EBUSY;
3469                 goto out;
3470         }
3471
3472         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3473 out:
3474         put_res(dev, slave, srqn, RES_SRQ);
3475         return err;
3476 }
3477
3478 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3479                         struct mlx4_vhcr *vhcr,
3480                         struct mlx4_cmd_mailbox *inbox,
3481                         struct mlx4_cmd_mailbox *outbox,
3482                         struct mlx4_cmd_info *cmd)
3483 {
3484         int err;
3485         int qpn = vhcr->in_modifier & 0x7fffff;
3486         struct res_qp *qp;
3487
3488         err = get_res(dev, slave, qpn, RES_QP, &qp);
3489         if (err)
3490                 return err;
3491         if (qp->com.from_state != RES_QP_HW) {
3492                 err = -EBUSY;
3493                 goto out;
3494         }
3495
3496         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3497 out:
3498         put_res(dev, slave, qpn, RES_QP);
3499         return err;
3500 }
3501
3502 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3503                               struct mlx4_vhcr *vhcr,
3504                               struct mlx4_cmd_mailbox *inbox,
3505                               struct mlx4_cmd_mailbox *outbox,
3506                               struct mlx4_cmd_info *cmd)
3507 {
3508         struct mlx4_qp_context *context = inbox->buf + 8;
3509         adjust_proxy_tun_qkey(dev, vhcr, context);
3510         update_pkey_index(dev, slave, inbox);
3511         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3512 }
3513
3514 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3515                                   struct mlx4_qp_context *qpc,
3516                                   struct mlx4_cmd_mailbox *inbox)
3517 {
3518         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3519         u8 pri_sched_queue;
3520         int port = mlx4_slave_convert_port(
3521                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3522
3523         if (port < 0)
3524                 return -EINVAL;
3525
3526         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3527                           ((port & 1) << 6);
3528
3529         if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3530             mlx4_is_eth(dev, port + 1)) {
3531                 qpc->pri_path.sched_queue = pri_sched_queue;
3532         }
3533
3534         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3535                 port = mlx4_slave_convert_port(
3536                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3537                                 + 1) - 1;
3538                 if (port < 0)
3539                         return -EINVAL;
3540                 qpc->alt_path.sched_queue =
3541                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3542                         (port & 1) << 6;
3543         }
3544         return 0;
3545 }
3546
3547 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3548                                 struct mlx4_qp_context *qpc,
3549                                 struct mlx4_cmd_mailbox *inbox)
3550 {
3551         u64 mac;
3552         int port;
3553         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3554         u8 sched = *(u8 *)(inbox->buf + 64);
3555         u8 smac_ix;
3556
3557         port = (sched >> 6 & 1) + 1;
3558         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3559                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3560                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3561                         return -ENOENT;
3562         }
3563         return 0;
3564 }
3565
3566 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3567                              struct mlx4_vhcr *vhcr,
3568                              struct mlx4_cmd_mailbox *inbox,
3569                              struct mlx4_cmd_mailbox *outbox,
3570                              struct mlx4_cmd_info *cmd)
3571 {
3572         int err;
3573         struct mlx4_qp_context *qpc = inbox->buf + 8;
3574         int qpn = vhcr->in_modifier & 0x7fffff;
3575         struct res_qp *qp;
3576         u8 orig_sched_queue;
3577         __be32  orig_param3 = qpc->param3;
3578         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3579         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3580         u8 orig_pri_path_fl = qpc->pri_path.fl;
3581         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3582         u8 orig_feup = qpc->pri_path.feup;
3583
3584         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3585         if (err)
3586                 return err;
3587         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3588         if (err)
3589                 return err;
3590
3591         if (roce_verify_mac(dev, slave, qpc, inbox))
3592                 return -EINVAL;
3593
3594         update_pkey_index(dev, slave, inbox);
3595         update_gid(dev, inbox, (u8)slave);
3596         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3597         orig_sched_queue = qpc->pri_path.sched_queue;
3598         err = update_vport_qp_param(dev, inbox, slave, qpn);
3599         if (err)
3600                 return err;
3601
3602         err = get_res(dev, slave, qpn, RES_QP, &qp);
3603         if (err)
3604                 return err;
3605         if (qp->com.from_state != RES_QP_HW) {
3606                 err = -EBUSY;
3607                 goto out;
3608         }
3609
3610         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3611 out:
3612         /* if no error, save sched queue value passed in by VF. This is
3613          * essentially the QOS value provided by the VF. This will be useful
3614          * if we allow dynamic changes from VST back to VGT
3615          */
3616         if (!err) {
3617                 qp->sched_queue = orig_sched_queue;
3618                 qp->param3      = orig_param3;
3619                 qp->vlan_control = orig_vlan_control;
3620                 qp->fvl_rx      =  orig_fvl_rx;
3621                 qp->pri_path_fl = orig_pri_path_fl;
3622                 qp->vlan_index  = orig_vlan_index;
3623                 qp->feup        = orig_feup;
3624         }
3625         put_res(dev, slave, qpn, RES_QP);
3626         return err;
3627 }
3628
3629 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3630                             struct mlx4_vhcr *vhcr,
3631                             struct mlx4_cmd_mailbox *inbox,
3632                             struct mlx4_cmd_mailbox *outbox,
3633                             struct mlx4_cmd_info *cmd)
3634 {
3635         int err;
3636         struct mlx4_qp_context *context = inbox->buf + 8;
3637
3638         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3639         if (err)
3640                 return err;
3641         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3642         if (err)
3643                 return err;
3644
3645         update_pkey_index(dev, slave, inbox);
3646         update_gid(dev, inbox, (u8)slave);
3647         adjust_proxy_tun_qkey(dev, vhcr, context);
3648         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3649 }
3650
3651 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3652                             struct mlx4_vhcr *vhcr,
3653                             struct mlx4_cmd_mailbox *inbox,
3654                             struct mlx4_cmd_mailbox *outbox,
3655                             struct mlx4_cmd_info *cmd)
3656 {
3657         int err;
3658         struct mlx4_qp_context *context = inbox->buf + 8;
3659
3660         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3661         if (err)
3662                 return err;
3663         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3664         if (err)
3665                 return err;
3666
3667         update_pkey_index(dev, slave, inbox);
3668         update_gid(dev, inbox, (u8)slave);
3669         adjust_proxy_tun_qkey(dev, vhcr, context);
3670         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3671 }
3672
3673
3674 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3675                               struct mlx4_vhcr *vhcr,
3676                               struct mlx4_cmd_mailbox *inbox,
3677                               struct mlx4_cmd_mailbox *outbox,
3678                               struct mlx4_cmd_info *cmd)
3679 {
3680         struct mlx4_qp_context *context = inbox->buf + 8;
3681         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3682         if (err)
3683                 return err;
3684         adjust_proxy_tun_qkey(dev, vhcr, context);
3685         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3686 }
3687
3688 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3689                             struct mlx4_vhcr *vhcr,
3690                             struct mlx4_cmd_mailbox *inbox,
3691                             struct mlx4_cmd_mailbox *outbox,
3692                             struct mlx4_cmd_info *cmd)
3693 {
3694         int err;
3695         struct mlx4_qp_context *context = inbox->buf + 8;
3696
3697         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3698         if (err)
3699                 return err;
3700         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3701         if (err)
3702                 return err;
3703
3704         adjust_proxy_tun_qkey(dev, vhcr, context);
3705         update_gid(dev, inbox, (u8)slave);
3706         update_pkey_index(dev, slave, inbox);
3707         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3708 }
3709
3710 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3711                             struct mlx4_vhcr *vhcr,
3712                             struct mlx4_cmd_mailbox *inbox,
3713                             struct mlx4_cmd_mailbox *outbox,
3714                             struct mlx4_cmd_info *cmd)
3715 {
3716         int err;
3717         struct mlx4_qp_context *context = inbox->buf + 8;
3718
3719         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3720         if (err)
3721                 return err;
3722         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3723         if (err)
3724                 return err;
3725
3726         adjust_proxy_tun_qkey(dev, vhcr, context);
3727         update_gid(dev, inbox, (u8)slave);
3728         update_pkey_index(dev, slave, inbox);
3729         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3730 }
3731
3732 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3733                          struct mlx4_vhcr *vhcr,
3734                          struct mlx4_cmd_mailbox *inbox,
3735                          struct mlx4_cmd_mailbox *outbox,
3736                          struct mlx4_cmd_info *cmd)
3737 {
3738         int err;
3739         int qpn = vhcr->in_modifier & 0x7fffff;
3740         struct res_qp *qp;
3741
3742         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3743         if (err)
3744                 return err;
3745         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3746         if (err)
3747                 goto ex_abort;
3748
3749         atomic_dec(&qp->mtt->ref_count);
3750         atomic_dec(&qp->rcq->ref_count);
3751         atomic_dec(&qp->scq->ref_count);
3752         if (qp->srq)
3753                 atomic_dec(&qp->srq->ref_count);
3754         res_end_move(dev, slave, RES_QP, qpn);
3755         return 0;
3756
3757 ex_abort:
3758         res_abort_move(dev, slave, RES_QP, qpn);
3759
3760         return err;
3761 }
3762
3763 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3764                                 struct res_qp *rqp, u8 *gid)
3765 {
3766         struct res_gid *res;
3767
3768         list_for_each_entry(res, &rqp->mcg_list, list) {
3769                 if (!memcmp(res->gid, gid, 16))
3770                         return res;
3771         }
3772         return NULL;
3773 }
3774
3775 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3776                        u8 *gid, enum mlx4_protocol prot,
3777                        enum mlx4_steer_type steer, u64 reg_id)
3778 {
3779         struct res_gid *res;
3780         int err;
3781
3782         res = kzalloc(sizeof *res, GFP_KERNEL);
3783         if (!res)
3784                 return -ENOMEM;
3785
3786         spin_lock_irq(&rqp->mcg_spl);
3787         if (find_gid(dev, slave, rqp, gid)) {
3788                 kfree(res);
3789                 err = -EEXIST;
3790         } else {
3791                 memcpy(res->gid, gid, 16);
3792                 res->prot = prot;
3793                 res->steer = steer;
3794                 res->reg_id = reg_id;
3795                 list_add_tail(&res->list, &rqp->mcg_list);
3796                 err = 0;
3797         }
3798         spin_unlock_irq(&rqp->mcg_spl);
3799
3800         return err;
3801 }
3802
3803 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3804                        u8 *gid, enum mlx4_protocol prot,
3805                        enum mlx4_steer_type steer, u64 *reg_id)
3806 {
3807         struct res_gid *res;
3808         int err;
3809
3810         spin_lock_irq(&rqp->mcg_spl);
3811         res = find_gid(dev, slave, rqp, gid);
3812         if (!res || res->prot != prot || res->steer != steer)
3813                 err = -EINVAL;
3814         else {
3815                 *reg_id = res->reg_id;
3816                 list_del(&res->list);
3817                 kfree(res);
3818                 err = 0;
3819         }
3820         spin_unlock_irq(&rqp->mcg_spl);
3821
3822         return err;
3823 }
3824
3825 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3826                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3827                      enum mlx4_steer_type type, u64 *reg_id)
3828 {
3829         switch (dev->caps.steering_mode) {
3830         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3831                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3832                 if (port < 0)
3833                         return port;
3834                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3835                                                 block_loopback, prot,
3836                                                 reg_id);
3837         }
3838         case MLX4_STEERING_MODE_B0:
3839                 if (prot == MLX4_PROT_ETH) {
3840                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3841                         if (port < 0)
3842                                 return port;
3843                         gid[5] = port;
3844                 }
3845                 return mlx4_qp_attach_common(dev, qp, gid,
3846                                             block_loopback, prot, type);
3847         default:
3848                 return -EINVAL;
3849         }
3850 }
3851
3852 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3853                      u8 gid[16], enum mlx4_protocol prot,
3854                      enum mlx4_steer_type type, u64 reg_id)
3855 {
3856         switch (dev->caps.steering_mode) {
3857         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3858                 return mlx4_flow_detach(dev, reg_id);
3859         case MLX4_STEERING_MODE_B0:
3860                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3861         default:
3862                 return -EINVAL;
3863         }
3864 }
3865
3866 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3867                             u8 *gid, enum mlx4_protocol prot)
3868 {
3869         int real_port;
3870
3871         if (prot != MLX4_PROT_ETH)
3872                 return 0;
3873
3874         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3875             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3876                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3877                 if (real_port < 0)
3878                         return -EINVAL;
3879                 gid[5] = real_port;
3880         }
3881
3882         return 0;
3883 }
3884
3885 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3886                                struct mlx4_vhcr *vhcr,
3887                                struct mlx4_cmd_mailbox *inbox,
3888                                struct mlx4_cmd_mailbox *outbox,
3889                                struct mlx4_cmd_info *cmd)
3890 {
3891         struct mlx4_qp qp; /* dummy for calling attach/detach */
3892         u8 *gid = inbox->buf;
3893         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3894         int err;
3895         int qpn;
3896         struct res_qp *rqp;
3897         u64 reg_id = 0;
3898         int attach = vhcr->op_modifier;
3899         int block_loopback = vhcr->in_modifier >> 31;
3900         u8 steer_type_mask = 2;
3901         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3902
3903         qpn = vhcr->in_modifier & 0xffffff;
3904         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3905         if (err)
3906                 return err;
3907
3908         qp.qpn = qpn;
3909         if (attach) {
3910                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3911                                 type, &reg_id);
3912                 if (err) {
3913                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3914                         goto ex_put;
3915                 }
3916                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3917                 if (err)
3918                         goto ex_detach;
3919         } else {
3920                 err = mlx4_adjust_port(dev, slave, gid, prot);
3921                 if (err)
3922                         goto ex_put;
3923
3924                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3925                 if (err)
3926                         goto ex_put;
3927
3928                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3929                 if (err)
3930                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3931                                qpn, reg_id);
3932         }
3933         put_res(dev, slave, qpn, RES_QP);
3934         return err;
3935
3936 ex_detach:
3937         qp_detach(dev, &qp, gid, prot, type, reg_id);
3938 ex_put:
3939         put_res(dev, slave, qpn, RES_QP);
3940         return err;
3941 }
3942
3943 /*
3944  * MAC validation for Flow Steering rules.
3945  * VF can attach rules only with a mac address which is assigned to it.
3946  */
3947 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3948                                    struct list_head *rlist)
3949 {
3950         struct mac_res *res, *tmp;
3951         __be64 be_mac;
3952
3953         /* make sure it isn't multicast or broadcast mac*/
3954         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3955             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3956                 list_for_each_entry_safe(res, tmp, rlist, list) {
3957                         be_mac = cpu_to_be64(res->mac << 16);
3958                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3959                                 return 0;
3960                 }
3961                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3962                        eth_header->eth.dst_mac, slave);
3963                 return -EINVAL;
3964         }
3965         return 0;
3966 }
3967
3968 /*
3969  * In case of missing eth header, append eth header with a MAC address
3970  * assigned to the VF.
3971  */
3972 static int add_eth_header(struct mlx4_dev *dev, int slave,
3973                           struct mlx4_cmd_mailbox *inbox,
3974                           struct list_head *rlist, int header_id)
3975 {
3976         struct mac_res *res, *tmp;
3977         u8 port;
3978         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3979         struct mlx4_net_trans_rule_hw_eth *eth_header;
3980         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3981         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3982         __be64 be_mac = 0;
3983         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3984
3985         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3986         port = ctrl->port;
3987         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3988
3989         /* Clear a space in the inbox for eth header */
3990         switch (header_id) {
3991         case MLX4_NET_TRANS_RULE_ID_IPV4:
3992                 ip_header =
3993                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3994                 memmove(ip_header, eth_header,
3995                         sizeof(*ip_header) + sizeof(*l4_header));
3996                 break;
3997         case MLX4_NET_TRANS_RULE_ID_TCP:
3998         case MLX4_NET_TRANS_RULE_ID_UDP:
3999                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4000                             (eth_header + 1);
4001                 memmove(l4_header, eth_header, sizeof(*l4_header));
4002                 break;
4003         default:
4004                 return -EINVAL;
4005         }
4006         list_for_each_entry_safe(res, tmp, rlist, list) {
4007                 if (port == res->port) {
4008                         be_mac = cpu_to_be64(res->mac << 16);
4009                         break;
4010                 }
4011         }
4012         if (!be_mac) {
4013                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4014                        port);
4015                 return -EINVAL;
4016         }
4017
4018         memset(eth_header, 0, sizeof(*eth_header));
4019         eth_header->size = sizeof(*eth_header) >> 2;
4020         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4021         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4022         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4023
4024         return 0;
4025
4026 }
4027
4028 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4029 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4030                            struct mlx4_vhcr *vhcr,
4031                            struct mlx4_cmd_mailbox *inbox,
4032                            struct mlx4_cmd_mailbox *outbox,
4033                            struct mlx4_cmd_info *cmd_info)
4034 {
4035         int err;
4036         u32 qpn = vhcr->in_modifier & 0xffffff;
4037         struct res_qp *rqp;
4038         u64 mac;
4039         unsigned port;
4040         u64 pri_addr_path_mask;
4041         struct mlx4_update_qp_context *cmd;
4042         int smac_index;
4043
4044         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4045
4046         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4047         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4048             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4049                 return -EPERM;
4050
4051         /* Just change the smac for the QP */
4052         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4053         if (err) {
4054                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4055                 return err;
4056         }
4057
4058         port = (rqp->sched_queue >> 6 & 1) + 1;
4059
4060         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4061                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4062                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4063                                                 smac_index, &mac);
4064
4065                 if (err) {
4066                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4067                                  qpn, smac_index);
4068                         goto err_mac;
4069                 }
4070         }
4071
4072         err = mlx4_cmd(dev, inbox->dma,
4073                        vhcr->in_modifier, 0,
4074                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4075                        MLX4_CMD_NATIVE);
4076         if (err) {
4077                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4078                 goto err_mac;
4079         }
4080
4081 err_mac:
4082         put_res(dev, slave, qpn, RES_QP);
4083         return err;
4084 }
4085
4086 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4087                                          struct mlx4_vhcr *vhcr,
4088                                          struct mlx4_cmd_mailbox *inbox,
4089                                          struct mlx4_cmd_mailbox *outbox,
4090                                          struct mlx4_cmd_info *cmd)
4091 {
4092
4093         struct mlx4_priv *priv = mlx4_priv(dev);
4094         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4095         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4096         int err;
4097         int qpn;
4098         struct res_qp *rqp;
4099         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4100         struct _rule_hw  *rule_header;
4101         int header_id;
4102
4103         if (dev->caps.steering_mode !=
4104             MLX4_STEERING_MODE_DEVICE_MANAGED)
4105                 return -EOPNOTSUPP;
4106
4107         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4108         ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4109         if (ctrl->port <= 0)
4110                 return -EINVAL;
4111         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4112         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4113         if (err) {
4114                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4115                 return err;
4116         }
4117         rule_header = (struct _rule_hw *)(ctrl + 1);
4118         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4119
4120         switch (header_id) {
4121         case MLX4_NET_TRANS_RULE_ID_ETH:
4122                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4123                         err = -EINVAL;
4124                         goto err_put;
4125                 }
4126                 break;
4127         case MLX4_NET_TRANS_RULE_ID_IB:
4128                 break;
4129         case MLX4_NET_TRANS_RULE_ID_IPV4:
4130         case MLX4_NET_TRANS_RULE_ID_TCP:
4131         case MLX4_NET_TRANS_RULE_ID_UDP:
4132                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4133                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4134                         err = -EINVAL;
4135                         goto err_put;
4136                 }
4137                 vhcr->in_modifier +=
4138                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4139                 break;
4140         default:
4141                 pr_err("Corrupted mailbox\n");
4142                 err = -EINVAL;
4143                 goto err_put;
4144         }
4145
4146         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4147                            vhcr->in_modifier, 0,
4148                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4149                            MLX4_CMD_NATIVE);
4150         if (err)
4151                 goto err_put;
4152
4153         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4154         if (err) {
4155                 mlx4_err(dev, "Fail to add flow steering resources\n");
4156                 /* detach rule*/
4157                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4158                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4159                          MLX4_CMD_NATIVE);
4160                 goto err_put;
4161         }
4162         atomic_inc(&rqp->ref_count);
4163 err_put:
4164         put_res(dev, slave, qpn, RES_QP);
4165         return err;
4166 }
4167
4168 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4169                                          struct mlx4_vhcr *vhcr,
4170                                          struct mlx4_cmd_mailbox *inbox,
4171                                          struct mlx4_cmd_mailbox *outbox,
4172                                          struct mlx4_cmd_info *cmd)
4173 {
4174         int err;
4175         struct res_qp *rqp;
4176         struct res_fs_rule *rrule;
4177
4178         if (dev->caps.steering_mode !=
4179             MLX4_STEERING_MODE_DEVICE_MANAGED)
4180                 return -EOPNOTSUPP;
4181
4182         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4183         if (err)
4184                 return err;
4185         /* Release the rule form busy state before removal */
4186         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4187         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4188         if (err)
4189                 return err;
4190
4191         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4192         if (err) {
4193                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4194                 goto out;
4195         }
4196
4197         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4198                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4199                        MLX4_CMD_NATIVE);
4200         if (!err)
4201                 atomic_dec(&rqp->ref_count);
4202 out:
4203         put_res(dev, slave, rrule->qpn, RES_QP);
4204         return err;
4205 }
4206
4207 enum {
4208         BUSY_MAX_RETRIES = 10
4209 };
4210
4211 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4212                                struct mlx4_vhcr *vhcr,
4213                                struct mlx4_cmd_mailbox *inbox,
4214                                struct mlx4_cmd_mailbox *outbox,
4215                                struct mlx4_cmd_info *cmd)
4216 {
4217         int err;
4218         int index = vhcr->in_modifier & 0xffff;
4219
4220         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4221         if (err)
4222                 return err;
4223
4224         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4225         put_res(dev, slave, index, RES_COUNTER);
4226         return err;
4227 }
4228
4229 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4230 {
4231         struct res_gid *rgid;
4232         struct res_gid *tmp;
4233         struct mlx4_qp qp; /* dummy for calling attach/detach */
4234
4235         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4236                 switch (dev->caps.steering_mode) {
4237                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4238                         mlx4_flow_detach(dev, rgid->reg_id);
4239                         break;
4240                 case MLX4_STEERING_MODE_B0:
4241                         qp.qpn = rqp->local_qpn;
4242                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4243                                                      rgid->prot, rgid->steer);
4244                         break;
4245                 }
4246                 list_del(&rgid->list);
4247                 kfree(rgid);
4248         }
4249 }
4250
4251 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4252                           enum mlx4_resource type, int print)
4253 {
4254         struct mlx4_priv *priv = mlx4_priv(dev);
4255         struct mlx4_resource_tracker *tracker =
4256                 &priv->mfunc.master.res_tracker;
4257         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4258         struct res_common *r;
4259         struct res_common *tmp;
4260         int busy;
4261
4262         busy = 0;
4263         spin_lock_irq(mlx4_tlock(dev));
4264         list_for_each_entry_safe(r, tmp, rlist, list) {
4265                 if (r->owner == slave) {
4266                         if (!r->removing) {
4267                                 if (r->state == RES_ANY_BUSY) {
4268                                         if (print)
4269                                                 mlx4_dbg(dev,
4270                                                          "%s id 0x%llx is busy\n",
4271                                                           resource_str(type),
4272                                                           r->res_id);
4273                                         ++busy;
4274                                 } else {
4275                                         r->from_state = r->state;
4276                                         r->state = RES_ANY_BUSY;
4277                                         r->removing = 1;
4278                                 }
4279                         }
4280                 }
4281         }
4282         spin_unlock_irq(mlx4_tlock(dev));
4283
4284         return busy;
4285 }
4286
4287 static int move_all_busy(struct mlx4_dev *dev, int slave,
4288                          enum mlx4_resource type)
4289 {
4290         unsigned long begin;
4291         int busy;
4292
4293         begin = jiffies;
4294         do {
4295                 busy = _move_all_busy(dev, slave, type, 0);
4296                 if (time_after(jiffies, begin + 5 * HZ))
4297                         break;
4298                 if (busy)
4299                         cond_resched();
4300         } while (busy);
4301
4302         if (busy)
4303                 busy = _move_all_busy(dev, slave, type, 1);
4304
4305         return busy;
4306 }
4307 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4308 {
4309         struct mlx4_priv *priv = mlx4_priv(dev);
4310         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4311         struct list_head *qp_list =
4312                 &tracker->slave_list[slave].res_list[RES_QP];
4313         struct res_qp *qp;
4314         struct res_qp *tmp;
4315         int state;
4316         u64 in_param;
4317         int qpn;
4318         int err;
4319
4320         err = move_all_busy(dev, slave, RES_QP);
4321         if (err)
4322                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4323                           slave);
4324
4325         spin_lock_irq(mlx4_tlock(dev));
4326         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4327                 spin_unlock_irq(mlx4_tlock(dev));
4328                 if (qp->com.owner == slave) {
4329                         qpn = qp->com.res_id;
4330                         detach_qp(dev, slave, qp);
4331                         state = qp->com.from_state;
4332                         while (state != 0) {
4333                                 switch (state) {
4334                                 case RES_QP_RESERVED:
4335                                         spin_lock_irq(mlx4_tlock(dev));
4336                                         rb_erase(&qp->com.node,
4337                                                  &tracker->res_tree[RES_QP]);
4338                                         list_del(&qp->com.list);
4339                                         spin_unlock_irq(mlx4_tlock(dev));
4340                                         if (!valid_reserved(dev, slave, qpn)) {
4341                                                 __mlx4_qp_release_range(dev, qpn, 1);
4342                                                 mlx4_release_resource(dev, slave,
4343                                                                       RES_QP, 1, 0);
4344                                         }
4345                                         kfree(qp);
4346                                         state = 0;
4347                                         break;
4348                                 case RES_QP_MAPPED:
4349                                         if (!valid_reserved(dev, slave, qpn))
4350                                                 __mlx4_qp_free_icm(dev, qpn);
4351                                         state = RES_QP_RESERVED;
4352                                         break;
4353                                 case RES_QP_HW:
4354                                         in_param = slave;
4355                                         err = mlx4_cmd(dev, in_param,
4356                                                        qp->local_qpn, 2,
4357                                                        MLX4_CMD_2RST_QP,
4358                                                        MLX4_CMD_TIME_CLASS_A,
4359                                                        MLX4_CMD_NATIVE);
4360                                         if (err)
4361                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4362                                                          slave, qp->local_qpn);
4363                                         atomic_dec(&qp->rcq->ref_count);
4364                                         atomic_dec(&qp->scq->ref_count);
4365                                         atomic_dec(&qp->mtt->ref_count);
4366                                         if (qp->srq)
4367                                                 atomic_dec(&qp->srq->ref_count);
4368                                         state = RES_QP_MAPPED;
4369                                         break;
4370                                 default:
4371                                         state = 0;
4372                                 }
4373                         }
4374                 }
4375                 spin_lock_irq(mlx4_tlock(dev));
4376         }
4377         spin_unlock_irq(mlx4_tlock(dev));
4378 }
4379
4380 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4381 {
4382         struct mlx4_priv *priv = mlx4_priv(dev);
4383         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4384         struct list_head *srq_list =
4385                 &tracker->slave_list[slave].res_list[RES_SRQ];
4386         struct res_srq *srq;
4387         struct res_srq *tmp;
4388         int state;
4389         u64 in_param;
4390         LIST_HEAD(tlist);
4391         int srqn;
4392         int err;
4393
4394         err = move_all_busy(dev, slave, RES_SRQ);
4395         if (err)
4396                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4397                           slave);
4398
4399         spin_lock_irq(mlx4_tlock(dev));
4400         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4401                 spin_unlock_irq(mlx4_tlock(dev));
4402                 if (srq->com.owner == slave) {
4403                         srqn = srq->com.res_id;
4404                         state = srq->com.from_state;
4405                         while (state != 0) {
4406                                 switch (state) {
4407                                 case RES_SRQ_ALLOCATED:
4408                                         __mlx4_srq_free_icm(dev, srqn);
4409                                         spin_lock_irq(mlx4_tlock(dev));
4410                                         rb_erase(&srq->com.node,
4411                                                  &tracker->res_tree[RES_SRQ]);
4412                                         list_del(&srq->com.list);
4413                                         spin_unlock_irq(mlx4_tlock(dev));
4414                                         mlx4_release_resource(dev, slave,
4415                                                               RES_SRQ, 1, 0);
4416                                         kfree(srq);
4417                                         state = 0;
4418                                         break;
4419
4420                                 case RES_SRQ_HW:
4421                                         in_param = slave;
4422                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4423                                                        MLX4_CMD_HW2SW_SRQ,
4424                                                        MLX4_CMD_TIME_CLASS_A,
4425                                                        MLX4_CMD_NATIVE);
4426                                         if (err)
4427                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4428                                                          slave, srqn);
4429
4430                                         atomic_dec(&srq->mtt->ref_count);
4431                                         if (srq->cq)
4432                                                 atomic_dec(&srq->cq->ref_count);
4433                                         state = RES_SRQ_ALLOCATED;
4434                                         break;
4435
4436                                 default:
4437                                         state = 0;
4438                                 }
4439                         }
4440                 }
4441                 spin_lock_irq(mlx4_tlock(dev));
4442         }
4443         spin_unlock_irq(mlx4_tlock(dev));
4444 }
4445
4446 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4447 {
4448         struct mlx4_priv *priv = mlx4_priv(dev);
4449         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4450         struct list_head *cq_list =
4451                 &tracker->slave_list[slave].res_list[RES_CQ];
4452         struct res_cq *cq;
4453         struct res_cq *tmp;
4454         int state;
4455         u64 in_param;
4456         LIST_HEAD(tlist);
4457         int cqn;
4458         int err;
4459
4460         err = move_all_busy(dev, slave, RES_CQ);
4461         if (err)
4462                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4463                           slave);
4464
4465         spin_lock_irq(mlx4_tlock(dev));
4466         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4467                 spin_unlock_irq(mlx4_tlock(dev));
4468                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4469                         cqn = cq->com.res_id;
4470                         state = cq->com.from_state;
4471                         while (state != 0) {
4472                                 switch (state) {
4473                                 case RES_CQ_ALLOCATED:
4474                                         __mlx4_cq_free_icm(dev, cqn);
4475                                         spin_lock_irq(mlx4_tlock(dev));
4476                                         rb_erase(&cq->com.node,
4477                                                  &tracker->res_tree[RES_CQ]);
4478                                         list_del(&cq->com.list);
4479                                         spin_unlock_irq(mlx4_tlock(dev));
4480                                         mlx4_release_resource(dev, slave,
4481                                                               RES_CQ, 1, 0);
4482                                         kfree(cq);
4483                                         state = 0;
4484                                         break;
4485
4486                                 case RES_CQ_HW:
4487                                         in_param = slave;
4488                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4489                                                        MLX4_CMD_HW2SW_CQ,
4490                                                        MLX4_CMD_TIME_CLASS_A,
4491                                                        MLX4_CMD_NATIVE);
4492                                         if (err)
4493                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4494                                                          slave, cqn);
4495                                         atomic_dec(&cq->mtt->ref_count);
4496                                         state = RES_CQ_ALLOCATED;
4497                                         break;
4498
4499                                 default:
4500                                         state = 0;
4501                                 }
4502                         }
4503                 }
4504                 spin_lock_irq(mlx4_tlock(dev));
4505         }
4506         spin_unlock_irq(mlx4_tlock(dev));
4507 }
4508
4509 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4510 {
4511         struct mlx4_priv *priv = mlx4_priv(dev);
4512         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4513         struct list_head *mpt_list =
4514                 &tracker->slave_list[slave].res_list[RES_MPT];
4515         struct res_mpt *mpt;
4516         struct res_mpt *tmp;
4517         int state;
4518         u64 in_param;
4519         LIST_HEAD(tlist);
4520         int mptn;
4521         int err;
4522
4523         err = move_all_busy(dev, slave, RES_MPT);
4524         if (err)
4525                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4526                           slave);
4527
4528         spin_lock_irq(mlx4_tlock(dev));
4529         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4530                 spin_unlock_irq(mlx4_tlock(dev));
4531                 if (mpt->com.owner == slave) {
4532                         mptn = mpt->com.res_id;
4533                         state = mpt->com.from_state;
4534                         while (state != 0) {
4535                                 switch (state) {
4536                                 case RES_MPT_RESERVED:
4537                                         __mlx4_mpt_release(dev, mpt->key);
4538                                         spin_lock_irq(mlx4_tlock(dev));
4539                                         rb_erase(&mpt->com.node,
4540                                                  &tracker->res_tree[RES_MPT]);
4541                                         list_del(&mpt->com.list);
4542                                         spin_unlock_irq(mlx4_tlock(dev));
4543                                         mlx4_release_resource(dev, slave,
4544                                                               RES_MPT, 1, 0);
4545                                         kfree(mpt);
4546                                         state = 0;
4547                                         break;
4548
4549                                 case RES_MPT_MAPPED:
4550                                         __mlx4_mpt_free_icm(dev, mpt->key);
4551                                         state = RES_MPT_RESERVED;
4552                                         break;
4553
4554                                 case RES_MPT_HW:
4555                                         in_param = slave;
4556                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4557                                                      MLX4_CMD_HW2SW_MPT,
4558                                                      MLX4_CMD_TIME_CLASS_A,
4559                                                      MLX4_CMD_NATIVE);
4560                                         if (err)
4561                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4562                                                          slave, mptn);
4563                                         if (mpt->mtt)
4564                                                 atomic_dec(&mpt->mtt->ref_count);
4565                                         state = RES_MPT_MAPPED;
4566                                         break;
4567                                 default:
4568                                         state = 0;
4569                                 }
4570                         }
4571                 }
4572                 spin_lock_irq(mlx4_tlock(dev));
4573         }
4574         spin_unlock_irq(mlx4_tlock(dev));
4575 }
4576
4577 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4578 {
4579         struct mlx4_priv *priv = mlx4_priv(dev);
4580         struct mlx4_resource_tracker *tracker =
4581                 &priv->mfunc.master.res_tracker;
4582         struct list_head *mtt_list =
4583                 &tracker->slave_list[slave].res_list[RES_MTT];
4584         struct res_mtt *mtt;
4585         struct res_mtt *tmp;
4586         int state;
4587         LIST_HEAD(tlist);
4588         int base;
4589         int err;
4590
4591         err = move_all_busy(dev, slave, RES_MTT);
4592         if (err)
4593                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4594                           slave);
4595
4596         spin_lock_irq(mlx4_tlock(dev));
4597         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4598                 spin_unlock_irq(mlx4_tlock(dev));
4599                 if (mtt->com.owner == slave) {
4600                         base = mtt->com.res_id;
4601                         state = mtt->com.from_state;
4602                         while (state != 0) {
4603                                 switch (state) {
4604                                 case RES_MTT_ALLOCATED:
4605                                         __mlx4_free_mtt_range(dev, base,
4606                                                               mtt->order);
4607                                         spin_lock_irq(mlx4_tlock(dev));
4608                                         rb_erase(&mtt->com.node,
4609                                                  &tracker->res_tree[RES_MTT]);
4610                                         list_del(&mtt->com.list);
4611                                         spin_unlock_irq(mlx4_tlock(dev));
4612                                         mlx4_release_resource(dev, slave, RES_MTT,
4613                                                               1 << mtt->order, 0);
4614                                         kfree(mtt);
4615                                         state = 0;
4616                                         break;
4617
4618                                 default:
4619                                         state = 0;
4620                                 }
4621                         }
4622                 }
4623                 spin_lock_irq(mlx4_tlock(dev));
4624         }
4625         spin_unlock_irq(mlx4_tlock(dev));
4626 }
4627
4628 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4629 {
4630         struct mlx4_priv *priv = mlx4_priv(dev);
4631         struct mlx4_resource_tracker *tracker =
4632                 &priv->mfunc.master.res_tracker;
4633         struct list_head *fs_rule_list =
4634                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4635         struct res_fs_rule *fs_rule;
4636         struct res_fs_rule *tmp;
4637         int state;
4638         u64 base;
4639         int err;
4640
4641         err = move_all_busy(dev, slave, RES_FS_RULE);
4642         if (err)
4643                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4644                           slave);
4645
4646         spin_lock_irq(mlx4_tlock(dev));
4647         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4648                 spin_unlock_irq(mlx4_tlock(dev));
4649                 if (fs_rule->com.owner == slave) {
4650                         base = fs_rule->com.res_id;
4651                         state = fs_rule->com.from_state;
4652                         while (state != 0) {
4653                                 switch (state) {
4654                                 case RES_FS_RULE_ALLOCATED:
4655                                         /* detach rule */
4656                                         err = mlx4_cmd(dev, base, 0, 0,
4657                                                        MLX4_QP_FLOW_STEERING_DETACH,
4658                                                        MLX4_CMD_TIME_CLASS_A,
4659                                                        MLX4_CMD_NATIVE);
4660
4661                                         spin_lock_irq(mlx4_tlock(dev));
4662                                         rb_erase(&fs_rule->com.node,
4663                                                  &tracker->res_tree[RES_FS_RULE]);
4664                                         list_del(&fs_rule->com.list);
4665                                         spin_unlock_irq(mlx4_tlock(dev));
4666                                         kfree(fs_rule);
4667                                         state = 0;
4668                                         break;
4669
4670                                 default:
4671                                         state = 0;
4672                                 }
4673                         }
4674                 }
4675                 spin_lock_irq(mlx4_tlock(dev));
4676         }
4677         spin_unlock_irq(mlx4_tlock(dev));
4678 }
4679
4680 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4681 {
4682         struct mlx4_priv *priv = mlx4_priv(dev);
4683         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4684         struct list_head *eq_list =
4685                 &tracker->slave_list[slave].res_list[RES_EQ];
4686         struct res_eq *eq;
4687         struct res_eq *tmp;
4688         int err;
4689         int state;
4690         LIST_HEAD(tlist);
4691         int eqn;
4692
4693         err = move_all_busy(dev, slave, RES_EQ);
4694         if (err)
4695                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4696                           slave);
4697
4698         spin_lock_irq(mlx4_tlock(dev));
4699         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4700                 spin_unlock_irq(mlx4_tlock(dev));
4701                 if (eq->com.owner == slave) {
4702                         eqn = eq->com.res_id;
4703                         state = eq->com.from_state;
4704                         while (state != 0) {
4705                                 switch (state) {
4706                                 case RES_EQ_RESERVED:
4707                                         spin_lock_irq(mlx4_tlock(dev));
4708                                         rb_erase(&eq->com.node,
4709                                                  &tracker->res_tree[RES_EQ]);
4710                                         list_del(&eq->com.list);
4711                                         spin_unlock_irq(mlx4_tlock(dev));
4712                                         kfree(eq);
4713                                         state = 0;
4714                                         break;
4715
4716                                 case RES_EQ_HW:
4717                                         err = mlx4_cmd(dev, slave, eqn & 0xff,
4718                                                        1, MLX4_CMD_HW2SW_EQ,
4719                                                        MLX4_CMD_TIME_CLASS_A,
4720                                                        MLX4_CMD_NATIVE);
4721                                         if (err)
4722                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4723                                                          slave, eqn);
4724                                         atomic_dec(&eq->mtt->ref_count);
4725                                         state = RES_EQ_RESERVED;
4726                                         break;
4727
4728                                 default:
4729                                         state = 0;
4730                                 }
4731                         }
4732                 }
4733                 spin_lock_irq(mlx4_tlock(dev));
4734         }
4735         spin_unlock_irq(mlx4_tlock(dev));
4736 }
4737
4738 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4739 {
4740         struct mlx4_priv *priv = mlx4_priv(dev);
4741         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4742         struct list_head *counter_list =
4743                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4744         struct res_counter *counter;
4745         struct res_counter *tmp;
4746         int err;
4747         int index;
4748
4749         err = move_all_busy(dev, slave, RES_COUNTER);
4750         if (err)
4751                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4752                           slave);
4753
4754         spin_lock_irq(mlx4_tlock(dev));
4755         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4756                 if (counter->com.owner == slave) {
4757                         index = counter->com.res_id;
4758                         rb_erase(&counter->com.node,
4759                                  &tracker->res_tree[RES_COUNTER]);
4760                         list_del(&counter->com.list);
4761                         kfree(counter);
4762                         __mlx4_counter_free(dev, index);
4763                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4764                 }
4765         }
4766         spin_unlock_irq(mlx4_tlock(dev));
4767 }
4768
4769 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4770 {
4771         struct mlx4_priv *priv = mlx4_priv(dev);
4772         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4773         struct list_head *xrcdn_list =
4774                 &tracker->slave_list[slave].res_list[RES_XRCD];
4775         struct res_xrcdn *xrcd;
4776         struct res_xrcdn *tmp;
4777         int err;
4778         int xrcdn;
4779
4780         err = move_all_busy(dev, slave, RES_XRCD);
4781         if (err)
4782                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4783                           slave);
4784
4785         spin_lock_irq(mlx4_tlock(dev));
4786         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4787                 if (xrcd->com.owner == slave) {
4788                         xrcdn = xrcd->com.res_id;
4789                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4790                         list_del(&xrcd->com.list);
4791                         kfree(xrcd);
4792                         __mlx4_xrcd_free(dev, xrcdn);
4793                 }
4794         }
4795         spin_unlock_irq(mlx4_tlock(dev));
4796 }
4797
4798 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4799 {
4800         struct mlx4_priv *priv = mlx4_priv(dev);
4801         mlx4_reset_roce_gids(dev, slave);
4802         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4803         rem_slave_vlans(dev, slave);
4804         rem_slave_macs(dev, slave);
4805         rem_slave_fs_rule(dev, slave);
4806         rem_slave_qps(dev, slave);
4807         rem_slave_srqs(dev, slave);
4808         rem_slave_cqs(dev, slave);
4809         rem_slave_mrs(dev, slave);
4810         rem_slave_eqs(dev, slave);
4811         rem_slave_mtts(dev, slave);
4812         rem_slave_counters(dev, slave);
4813         rem_slave_xrcdns(dev, slave);
4814         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4815 }
4816
4817 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4818 {
4819         struct mlx4_vf_immed_vlan_work *work =
4820                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4821         struct mlx4_cmd_mailbox *mailbox;
4822         struct mlx4_update_qp_context *upd_context;
4823         struct mlx4_dev *dev = &work->priv->dev;
4824         struct mlx4_resource_tracker *tracker =
4825                 &work->priv->mfunc.master.res_tracker;
4826         struct list_head *qp_list =
4827                 &tracker->slave_list[work->slave].res_list[RES_QP];
4828         struct res_qp *qp;
4829         struct res_qp *tmp;
4830         u64 qp_path_mask_vlan_ctrl =
4831                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4832                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4833                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4834                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4835                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4836                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4837
4838         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4839                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4840                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4841                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4842                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4843                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4844                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4845
4846         int err;
4847         int port, errors = 0;
4848         u8 vlan_control;
4849
4850         if (mlx4_is_slave(dev)) {
4851                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4852                           work->slave);
4853                 goto out;
4854         }
4855
4856         mailbox = mlx4_alloc_cmd_mailbox(dev);
4857         if (IS_ERR(mailbox))
4858                 goto out;
4859         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4860                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4861                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4862                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4863                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4864                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4865                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4866         else if (!work->vlan_id)
4867                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4868                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4869         else
4870                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4871                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4872                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4873
4874         upd_context = mailbox->buf;
4875         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4876
4877         spin_lock_irq(mlx4_tlock(dev));
4878         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4879                 spin_unlock_irq(mlx4_tlock(dev));
4880                 if (qp->com.owner == work->slave) {
4881                         if (qp->com.from_state != RES_QP_HW ||
4882                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4883                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4884                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4885                                 spin_lock_irq(mlx4_tlock(dev));
4886                                 continue;
4887                         }
4888                         port = (qp->sched_queue >> 6 & 1) + 1;
4889                         if (port != work->port) {
4890                                 spin_lock_irq(mlx4_tlock(dev));
4891                                 continue;
4892                         }
4893                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4894                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4895                         else
4896                                 upd_context->primary_addr_path_mask =
4897                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4898                         if (work->vlan_id == MLX4_VGT) {
4899                                 upd_context->qp_context.param3 = qp->param3;
4900                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4901                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4902                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4903                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4904                                 upd_context->qp_context.pri_path.feup = qp->feup;
4905                                 upd_context->qp_context.pri_path.sched_queue =
4906                                         qp->sched_queue;
4907                         } else {
4908                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4909                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4910                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4911                                 upd_context->qp_context.pri_path.fvl_rx =
4912                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4913                                 upd_context->qp_context.pri_path.fl =
4914                                         qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4915                                 upd_context->qp_context.pri_path.feup =
4916                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4917                                 upd_context->qp_context.pri_path.sched_queue =
4918                                         qp->sched_queue & 0xC7;
4919                                 upd_context->qp_context.pri_path.sched_queue |=
4920                                         ((work->qos & 0x7) << 3);
4921                         }
4922
4923                         err = mlx4_cmd(dev, mailbox->dma,
4924                                        qp->local_qpn & 0xffffff,
4925                                        0, MLX4_CMD_UPDATE_QP,
4926                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4927                         if (err) {
4928                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4929                                           work->slave, port, qp->local_qpn, err);
4930                                 errors++;
4931                         }
4932                 }
4933                 spin_lock_irq(mlx4_tlock(dev));
4934         }
4935         spin_unlock_irq(mlx4_tlock(dev));
4936         mlx4_free_cmd_mailbox(dev, mailbox);
4937
4938         if (errors)
4939                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4940                          errors, work->slave, work->port);
4941
4942         /* unregister previous vlan_id if needed and we had no errors
4943          * while updating the QPs
4944          */
4945         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4946             NO_INDX != work->orig_vlan_ix)
4947                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4948                                        work->orig_vlan_id);
4949 out:
4950         kfree(work);
4951         return;
4952 }