net/mlx4_core: Don't fail reg/unreg vlan for older guests
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         u8 port;
56 };
57
58 struct vlan_res {
59         struct list_head list;
60         u16 vlan;
61         int ref_count;
62         int vlan_index;
63         u8 port;
64 };
65
66 struct res_common {
67         struct list_head        list;
68         struct rb_node          node;
69         u64                     res_id;
70         int                     owner;
71         int                     state;
72         int                     from_state;
73         int                     to_state;
74         int                     removing;
75 };
76
77 enum {
78         RES_ANY_BUSY = 1
79 };
80
81 struct res_gid {
82         struct list_head        list;
83         u8                      gid[16];
84         enum mlx4_protocol      prot;
85         enum mlx4_steer_type    steer;
86         u64                     reg_id;
87 };
88
89 enum res_qp_states {
90         RES_QP_BUSY = RES_ANY_BUSY,
91
92         /* QP number was allocated */
93         RES_QP_RESERVED,
94
95         /* ICM memory for QP context was mapped */
96         RES_QP_MAPPED,
97
98         /* QP is in hw ownership */
99         RES_QP_HW
100 };
101
102 struct res_qp {
103         struct res_common       com;
104         struct res_mtt         *mtt;
105         struct res_cq          *rcq;
106         struct res_cq          *scq;
107         struct res_srq         *srq;
108         struct list_head        mcg_list;
109         spinlock_t              mcg_spl;
110         int                     local_qpn;
111         atomic_t                ref_count;
112         u32                     qpc_flags;
113         u8                      sched_queue;
114 };
115
116 enum res_mtt_states {
117         RES_MTT_BUSY = RES_ANY_BUSY,
118         RES_MTT_ALLOCATED,
119 };
120
121 static inline const char *mtt_states_str(enum res_mtt_states state)
122 {
123         switch (state) {
124         case RES_MTT_BUSY: return "RES_MTT_BUSY";
125         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
126         default: return "Unknown";
127         }
128 }
129
130 struct res_mtt {
131         struct res_common       com;
132         int                     order;
133         atomic_t                ref_count;
134 };
135
136 enum res_mpt_states {
137         RES_MPT_BUSY = RES_ANY_BUSY,
138         RES_MPT_RESERVED,
139         RES_MPT_MAPPED,
140         RES_MPT_HW,
141 };
142
143 struct res_mpt {
144         struct res_common       com;
145         struct res_mtt         *mtt;
146         int                     key;
147 };
148
149 enum res_eq_states {
150         RES_EQ_BUSY = RES_ANY_BUSY,
151         RES_EQ_RESERVED,
152         RES_EQ_HW,
153 };
154
155 struct res_eq {
156         struct res_common       com;
157         struct res_mtt         *mtt;
158 };
159
160 enum res_cq_states {
161         RES_CQ_BUSY = RES_ANY_BUSY,
162         RES_CQ_ALLOCATED,
163         RES_CQ_HW,
164 };
165
166 struct res_cq {
167         struct res_common       com;
168         struct res_mtt         *mtt;
169         atomic_t                ref_count;
170 };
171
172 enum res_srq_states {
173         RES_SRQ_BUSY = RES_ANY_BUSY,
174         RES_SRQ_ALLOCATED,
175         RES_SRQ_HW,
176 };
177
178 struct res_srq {
179         struct res_common       com;
180         struct res_mtt         *mtt;
181         struct res_cq          *cq;
182         atomic_t                ref_count;
183 };
184
185 enum res_counter_states {
186         RES_COUNTER_BUSY = RES_ANY_BUSY,
187         RES_COUNTER_ALLOCATED,
188 };
189
190 struct res_counter {
191         struct res_common       com;
192         int                     port;
193 };
194
195 enum res_xrcdn_states {
196         RES_XRCD_BUSY = RES_ANY_BUSY,
197         RES_XRCD_ALLOCATED,
198 };
199
200 struct res_xrcdn {
201         struct res_common       com;
202         int                     port;
203 };
204
205 enum res_fs_rule_states {
206         RES_FS_RULE_BUSY = RES_ANY_BUSY,
207         RES_FS_RULE_ALLOCATED,
208 };
209
210 struct res_fs_rule {
211         struct res_common       com;
212         int                     qpn;
213 };
214
215 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
216 {
217         struct rb_node *node = root->rb_node;
218
219         while (node) {
220                 struct res_common *res = container_of(node, struct res_common,
221                                                       node);
222
223                 if (res_id < res->res_id)
224                         node = node->rb_left;
225                 else if (res_id > res->res_id)
226                         node = node->rb_right;
227                 else
228                         return res;
229         }
230         return NULL;
231 }
232
233 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
234 {
235         struct rb_node **new = &(root->rb_node), *parent = NULL;
236
237         /* Figure out where to put new node */
238         while (*new) {
239                 struct res_common *this = container_of(*new, struct res_common,
240                                                        node);
241
242                 parent = *new;
243                 if (res->res_id < this->res_id)
244                         new = &((*new)->rb_left);
245                 else if (res->res_id > this->res_id)
246                         new = &((*new)->rb_right);
247                 else
248                         return -EEXIST;
249         }
250
251         /* Add new node and rebalance tree. */
252         rb_link_node(&res->node, parent, new);
253         rb_insert_color(&res->node, root);
254
255         return 0;
256 }
257
258 enum qp_transition {
259         QP_TRANS_INIT2RTR,
260         QP_TRANS_RTR2RTS,
261         QP_TRANS_RTS2RTS,
262         QP_TRANS_SQERR2RTS,
263         QP_TRANS_SQD2SQD,
264         QP_TRANS_SQD2RTS
265 };
266
267 /* For Debug uses */
268 static const char *ResourceType(enum mlx4_resource rt)
269 {
270         switch (rt) {
271         case RES_QP: return "RES_QP";
272         case RES_CQ: return "RES_CQ";
273         case RES_SRQ: return "RES_SRQ";
274         case RES_MPT: return "RES_MPT";
275         case RES_MTT: return "RES_MTT";
276         case RES_MAC: return  "RES_MAC";
277         case RES_VLAN: return  "RES_VLAN";
278         case RES_EQ: return "RES_EQ";
279         case RES_COUNTER: return "RES_COUNTER";
280         case RES_FS_RULE: return "RES_FS_RULE";
281         case RES_XRCD: return "RES_XRCD";
282         default: return "Unknown resource type !!!";
283         };
284 }
285
286 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
287 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
288 {
289         struct mlx4_priv *priv = mlx4_priv(dev);
290         int i;
291         int t;
292
293         priv->mfunc.master.res_tracker.slave_list =
294                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
295                         GFP_KERNEL);
296         if (!priv->mfunc.master.res_tracker.slave_list)
297                 return -ENOMEM;
298
299         for (i = 0 ; i < dev->num_slaves; i++) {
300                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
301                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
302                                        slave_list[i].res_list[t]);
303                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
304         }
305
306         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
307                  dev->num_slaves);
308         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
309                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
310
311         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
312         return 0 ;
313 }
314
315 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
316                                 enum mlx4_res_tracker_free_type type)
317 {
318         struct mlx4_priv *priv = mlx4_priv(dev);
319         int i;
320
321         if (priv->mfunc.master.res_tracker.slave_list) {
322                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
323                         for (i = 0; i < dev->num_slaves; i++) {
324                                 if (type == RES_TR_FREE_ALL ||
325                                     dev->caps.function != i)
326                                         mlx4_delete_all_resources_for_slave(dev, i);
327                         }
328                         /* free master's vlans */
329                         i = dev->caps.function;
330                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
331                         rem_slave_vlans(dev, i);
332                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
333                 }
334
335                 if (type != RES_TR_FREE_SLAVES_ONLY) {
336                         kfree(priv->mfunc.master.res_tracker.slave_list);
337                         priv->mfunc.master.res_tracker.slave_list = NULL;
338                 }
339         }
340 }
341
342 static void update_pkey_index(struct mlx4_dev *dev, int slave,
343                               struct mlx4_cmd_mailbox *inbox)
344 {
345         u8 sched = *(u8 *)(inbox->buf + 64);
346         u8 orig_index = *(u8 *)(inbox->buf + 35);
347         u8 new_index;
348         struct mlx4_priv *priv = mlx4_priv(dev);
349         int port;
350
351         port = (sched >> 6 & 1) + 1;
352
353         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
354         *(u8 *)(inbox->buf + 35) = new_index;
355 }
356
357 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
358                        u8 slave)
359 {
360         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
361         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
362         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
363
364         if (MLX4_QP_ST_UD == ts)
365                 qp_ctx->pri_path.mgid_index = 0x80 | slave;
366
367         if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
368                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
369                         qp_ctx->pri_path.mgid_index = slave & 0x7F;
370                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
371                         qp_ctx->alt_path.mgid_index = slave & 0x7F;
372         }
373 }
374
375 static int update_vport_qp_param(struct mlx4_dev *dev,
376                                  struct mlx4_cmd_mailbox *inbox,
377                                  u8 slave, u32 qpn)
378 {
379         struct mlx4_qp_context  *qpc = inbox->buf + 8;
380         struct mlx4_vport_oper_state *vp_oper;
381         struct mlx4_priv *priv;
382         u32 qp_type;
383         int port;
384
385         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
386         priv = mlx4_priv(dev);
387         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
388
389         if (MLX4_VGT != vp_oper->state.default_vlan) {
390                 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
391                 if (MLX4_QP_ST_RC == qp_type ||
392                     (MLX4_QP_ST_UD == qp_type &&
393                      !mlx4_is_qp_reserved(dev, qpn)))
394                         return -EINVAL;
395
396                 /* the reserved QPs (special, proxy, tunnel)
397                  * do not operate over vlans
398                  */
399                 if (mlx4_is_qp_reserved(dev, qpn))
400                         return 0;
401
402                 /* force strip vlan by clear vsd */
403                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
404
405                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
406                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
407                         qpc->pri_path.vlan_control =
408                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
409                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
410                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
411                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
412                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
413                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
414                 } else if (0 != vp_oper->state.default_vlan) {
415                         qpc->pri_path.vlan_control =
416                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
417                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
418                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
419                 } else { /* priority tagged */
420                         qpc->pri_path.vlan_control =
421                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
422                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
423                 }
424
425                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
426                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
427                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
428                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
429                 qpc->pri_path.sched_queue &= 0xC7;
430                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
431         }
432         if (vp_oper->state.spoofchk) {
433                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
434                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
435         }
436         return 0;
437 }
438
439 static int mpt_mask(struct mlx4_dev *dev)
440 {
441         return dev->caps.num_mpts - 1;
442 }
443
444 static void *find_res(struct mlx4_dev *dev, u64 res_id,
445                       enum mlx4_resource type)
446 {
447         struct mlx4_priv *priv = mlx4_priv(dev);
448
449         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
450                                   res_id);
451 }
452
453 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
454                    enum mlx4_resource type,
455                    void *res)
456 {
457         struct res_common *r;
458         int err = 0;
459
460         spin_lock_irq(mlx4_tlock(dev));
461         r = find_res(dev, res_id, type);
462         if (!r) {
463                 err = -ENONET;
464                 goto exit;
465         }
466
467         if (r->state == RES_ANY_BUSY) {
468                 err = -EBUSY;
469                 goto exit;
470         }
471
472         if (r->owner != slave) {
473                 err = -EPERM;
474                 goto exit;
475         }
476
477         r->from_state = r->state;
478         r->state = RES_ANY_BUSY;
479
480         if (res)
481                 *((struct res_common **)res) = r;
482
483 exit:
484         spin_unlock_irq(mlx4_tlock(dev));
485         return err;
486 }
487
488 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
489                                     enum mlx4_resource type,
490                                     u64 res_id, int *slave)
491 {
492
493         struct res_common *r;
494         int err = -ENOENT;
495         int id = res_id;
496
497         if (type == RES_QP)
498                 id &= 0x7fffff;
499         spin_lock(mlx4_tlock(dev));
500
501         r = find_res(dev, id, type);
502         if (r) {
503                 *slave = r->owner;
504                 err = 0;
505         }
506         spin_unlock(mlx4_tlock(dev));
507
508         return err;
509 }
510
511 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
512                     enum mlx4_resource type)
513 {
514         struct res_common *r;
515
516         spin_lock_irq(mlx4_tlock(dev));
517         r = find_res(dev, res_id, type);
518         if (r)
519                 r->state = r->from_state;
520         spin_unlock_irq(mlx4_tlock(dev));
521 }
522
523 static struct res_common *alloc_qp_tr(int id)
524 {
525         struct res_qp *ret;
526
527         ret = kzalloc(sizeof *ret, GFP_KERNEL);
528         if (!ret)
529                 return NULL;
530
531         ret->com.res_id = id;
532         ret->com.state = RES_QP_RESERVED;
533         ret->local_qpn = id;
534         INIT_LIST_HEAD(&ret->mcg_list);
535         spin_lock_init(&ret->mcg_spl);
536         atomic_set(&ret->ref_count, 0);
537
538         return &ret->com;
539 }
540
541 static struct res_common *alloc_mtt_tr(int id, int order)
542 {
543         struct res_mtt *ret;
544
545         ret = kzalloc(sizeof *ret, GFP_KERNEL);
546         if (!ret)
547                 return NULL;
548
549         ret->com.res_id = id;
550         ret->order = order;
551         ret->com.state = RES_MTT_ALLOCATED;
552         atomic_set(&ret->ref_count, 0);
553
554         return &ret->com;
555 }
556
557 static struct res_common *alloc_mpt_tr(int id, int key)
558 {
559         struct res_mpt *ret;
560
561         ret = kzalloc(sizeof *ret, GFP_KERNEL);
562         if (!ret)
563                 return NULL;
564
565         ret->com.res_id = id;
566         ret->com.state = RES_MPT_RESERVED;
567         ret->key = key;
568
569         return &ret->com;
570 }
571
572 static struct res_common *alloc_eq_tr(int id)
573 {
574         struct res_eq *ret;
575
576         ret = kzalloc(sizeof *ret, GFP_KERNEL);
577         if (!ret)
578                 return NULL;
579
580         ret->com.res_id = id;
581         ret->com.state = RES_EQ_RESERVED;
582
583         return &ret->com;
584 }
585
586 static struct res_common *alloc_cq_tr(int id)
587 {
588         struct res_cq *ret;
589
590         ret = kzalloc(sizeof *ret, GFP_KERNEL);
591         if (!ret)
592                 return NULL;
593
594         ret->com.res_id = id;
595         ret->com.state = RES_CQ_ALLOCATED;
596         atomic_set(&ret->ref_count, 0);
597
598         return &ret->com;
599 }
600
601 static struct res_common *alloc_srq_tr(int id)
602 {
603         struct res_srq *ret;
604
605         ret = kzalloc(sizeof *ret, GFP_KERNEL);
606         if (!ret)
607                 return NULL;
608
609         ret->com.res_id = id;
610         ret->com.state = RES_SRQ_ALLOCATED;
611         atomic_set(&ret->ref_count, 0);
612
613         return &ret->com;
614 }
615
616 static struct res_common *alloc_counter_tr(int id)
617 {
618         struct res_counter *ret;
619
620         ret = kzalloc(sizeof *ret, GFP_KERNEL);
621         if (!ret)
622                 return NULL;
623
624         ret->com.res_id = id;
625         ret->com.state = RES_COUNTER_ALLOCATED;
626
627         return &ret->com;
628 }
629
630 static struct res_common *alloc_xrcdn_tr(int id)
631 {
632         struct res_xrcdn *ret;
633
634         ret = kzalloc(sizeof *ret, GFP_KERNEL);
635         if (!ret)
636                 return NULL;
637
638         ret->com.res_id = id;
639         ret->com.state = RES_XRCD_ALLOCATED;
640
641         return &ret->com;
642 }
643
644 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
645 {
646         struct res_fs_rule *ret;
647
648         ret = kzalloc(sizeof *ret, GFP_KERNEL);
649         if (!ret)
650                 return NULL;
651
652         ret->com.res_id = id;
653         ret->com.state = RES_FS_RULE_ALLOCATED;
654         ret->qpn = qpn;
655         return &ret->com;
656 }
657
658 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
659                                    int extra)
660 {
661         struct res_common *ret;
662
663         switch (type) {
664         case RES_QP:
665                 ret = alloc_qp_tr(id);
666                 break;
667         case RES_MPT:
668                 ret = alloc_mpt_tr(id, extra);
669                 break;
670         case RES_MTT:
671                 ret = alloc_mtt_tr(id, extra);
672                 break;
673         case RES_EQ:
674                 ret = alloc_eq_tr(id);
675                 break;
676         case RES_CQ:
677                 ret = alloc_cq_tr(id);
678                 break;
679         case RES_SRQ:
680                 ret = alloc_srq_tr(id);
681                 break;
682         case RES_MAC:
683                 printk(KERN_ERR "implementation missing\n");
684                 return NULL;
685         case RES_COUNTER:
686                 ret = alloc_counter_tr(id);
687                 break;
688         case RES_XRCD:
689                 ret = alloc_xrcdn_tr(id);
690                 break;
691         case RES_FS_RULE:
692                 ret = alloc_fs_rule_tr(id, extra);
693                 break;
694         default:
695                 return NULL;
696         }
697         if (ret)
698                 ret->owner = slave;
699
700         return ret;
701 }
702
703 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
704                          enum mlx4_resource type, int extra)
705 {
706         int i;
707         int err;
708         struct mlx4_priv *priv = mlx4_priv(dev);
709         struct res_common **res_arr;
710         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
711         struct rb_root *root = &tracker->res_tree[type];
712
713         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
714         if (!res_arr)
715                 return -ENOMEM;
716
717         for (i = 0; i < count; ++i) {
718                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
719                 if (!res_arr[i]) {
720                         for (--i; i >= 0; --i)
721                                 kfree(res_arr[i]);
722
723                         kfree(res_arr);
724                         return -ENOMEM;
725                 }
726         }
727
728         spin_lock_irq(mlx4_tlock(dev));
729         for (i = 0; i < count; ++i) {
730                 if (find_res(dev, base + i, type)) {
731                         err = -EEXIST;
732                         goto undo;
733                 }
734                 err = res_tracker_insert(root, res_arr[i]);
735                 if (err)
736                         goto undo;
737                 list_add_tail(&res_arr[i]->list,
738                               &tracker->slave_list[slave].res_list[type]);
739         }
740         spin_unlock_irq(mlx4_tlock(dev));
741         kfree(res_arr);
742
743         return 0;
744
745 undo:
746         for (--i; i >= base; --i)
747                 rb_erase(&res_arr[i]->node, root);
748
749         spin_unlock_irq(mlx4_tlock(dev));
750
751         for (i = 0; i < count; ++i)
752                 kfree(res_arr[i]);
753
754         kfree(res_arr);
755
756         return err;
757 }
758
759 static int remove_qp_ok(struct res_qp *res)
760 {
761         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
762             !list_empty(&res->mcg_list)) {
763                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
764                        res->com.state, atomic_read(&res->ref_count));
765                 return -EBUSY;
766         } else if (res->com.state != RES_QP_RESERVED) {
767                 return -EPERM;
768         }
769
770         return 0;
771 }
772
773 static int remove_mtt_ok(struct res_mtt *res, int order)
774 {
775         if (res->com.state == RES_MTT_BUSY ||
776             atomic_read(&res->ref_count)) {
777                 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
778                        __func__, __LINE__,
779                        mtt_states_str(res->com.state),
780                        atomic_read(&res->ref_count));
781                 return -EBUSY;
782         } else if (res->com.state != RES_MTT_ALLOCATED)
783                 return -EPERM;
784         else if (res->order != order)
785                 return -EINVAL;
786
787         return 0;
788 }
789
790 static int remove_mpt_ok(struct res_mpt *res)
791 {
792         if (res->com.state == RES_MPT_BUSY)
793                 return -EBUSY;
794         else if (res->com.state != RES_MPT_RESERVED)
795                 return -EPERM;
796
797         return 0;
798 }
799
800 static int remove_eq_ok(struct res_eq *res)
801 {
802         if (res->com.state == RES_MPT_BUSY)
803                 return -EBUSY;
804         else if (res->com.state != RES_MPT_RESERVED)
805                 return -EPERM;
806
807         return 0;
808 }
809
810 static int remove_counter_ok(struct res_counter *res)
811 {
812         if (res->com.state == RES_COUNTER_BUSY)
813                 return -EBUSY;
814         else if (res->com.state != RES_COUNTER_ALLOCATED)
815                 return -EPERM;
816
817         return 0;
818 }
819
820 static int remove_xrcdn_ok(struct res_xrcdn *res)
821 {
822         if (res->com.state == RES_XRCD_BUSY)
823                 return -EBUSY;
824         else if (res->com.state != RES_XRCD_ALLOCATED)
825                 return -EPERM;
826
827         return 0;
828 }
829
830 static int remove_fs_rule_ok(struct res_fs_rule *res)
831 {
832         if (res->com.state == RES_FS_RULE_BUSY)
833                 return -EBUSY;
834         else if (res->com.state != RES_FS_RULE_ALLOCATED)
835                 return -EPERM;
836
837         return 0;
838 }
839
840 static int remove_cq_ok(struct res_cq *res)
841 {
842         if (res->com.state == RES_CQ_BUSY)
843                 return -EBUSY;
844         else if (res->com.state != RES_CQ_ALLOCATED)
845                 return -EPERM;
846
847         return 0;
848 }
849
850 static int remove_srq_ok(struct res_srq *res)
851 {
852         if (res->com.state == RES_SRQ_BUSY)
853                 return -EBUSY;
854         else if (res->com.state != RES_SRQ_ALLOCATED)
855                 return -EPERM;
856
857         return 0;
858 }
859
860 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
861 {
862         switch (type) {
863         case RES_QP:
864                 return remove_qp_ok((struct res_qp *)res);
865         case RES_CQ:
866                 return remove_cq_ok((struct res_cq *)res);
867         case RES_SRQ:
868                 return remove_srq_ok((struct res_srq *)res);
869         case RES_MPT:
870                 return remove_mpt_ok((struct res_mpt *)res);
871         case RES_MTT:
872                 return remove_mtt_ok((struct res_mtt *)res, extra);
873         case RES_MAC:
874                 return -ENOSYS;
875         case RES_EQ:
876                 return remove_eq_ok((struct res_eq *)res);
877         case RES_COUNTER:
878                 return remove_counter_ok((struct res_counter *)res);
879         case RES_XRCD:
880                 return remove_xrcdn_ok((struct res_xrcdn *)res);
881         case RES_FS_RULE:
882                 return remove_fs_rule_ok((struct res_fs_rule *)res);
883         default:
884                 return -EINVAL;
885         }
886 }
887
888 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
889                          enum mlx4_resource type, int extra)
890 {
891         u64 i;
892         int err;
893         struct mlx4_priv *priv = mlx4_priv(dev);
894         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
895         struct res_common *r;
896
897         spin_lock_irq(mlx4_tlock(dev));
898         for (i = base; i < base + count; ++i) {
899                 r = res_tracker_lookup(&tracker->res_tree[type], i);
900                 if (!r) {
901                         err = -ENOENT;
902                         goto out;
903                 }
904                 if (r->owner != slave) {
905                         err = -EPERM;
906                         goto out;
907                 }
908                 err = remove_ok(r, type, extra);
909                 if (err)
910                         goto out;
911         }
912
913         for (i = base; i < base + count; ++i) {
914                 r = res_tracker_lookup(&tracker->res_tree[type], i);
915                 rb_erase(&r->node, &tracker->res_tree[type]);
916                 list_del(&r->list);
917                 kfree(r);
918         }
919         err = 0;
920
921 out:
922         spin_unlock_irq(mlx4_tlock(dev));
923
924         return err;
925 }
926
927 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
928                                 enum res_qp_states state, struct res_qp **qp,
929                                 int alloc)
930 {
931         struct mlx4_priv *priv = mlx4_priv(dev);
932         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
933         struct res_qp *r;
934         int err = 0;
935
936         spin_lock_irq(mlx4_tlock(dev));
937         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
938         if (!r)
939                 err = -ENOENT;
940         else if (r->com.owner != slave)
941                 err = -EPERM;
942         else {
943                 switch (state) {
944                 case RES_QP_BUSY:
945                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
946                                  __func__, r->com.res_id);
947                         err = -EBUSY;
948                         break;
949
950                 case RES_QP_RESERVED:
951                         if (r->com.state == RES_QP_MAPPED && !alloc)
952                                 break;
953
954                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
955                         err = -EINVAL;
956                         break;
957
958                 case RES_QP_MAPPED:
959                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
960                             r->com.state == RES_QP_HW)
961                                 break;
962                         else {
963                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
964                                           r->com.res_id);
965                                 err = -EINVAL;
966                         }
967
968                         break;
969
970                 case RES_QP_HW:
971                         if (r->com.state != RES_QP_MAPPED)
972                                 err = -EINVAL;
973                         break;
974                 default:
975                         err = -EINVAL;
976                 }
977
978                 if (!err) {
979                         r->com.from_state = r->com.state;
980                         r->com.to_state = state;
981                         r->com.state = RES_QP_BUSY;
982                         if (qp)
983                                 *qp = r;
984                 }
985         }
986
987         spin_unlock_irq(mlx4_tlock(dev));
988
989         return err;
990 }
991
992 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
993                                 enum res_mpt_states state, struct res_mpt **mpt)
994 {
995         struct mlx4_priv *priv = mlx4_priv(dev);
996         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
997         struct res_mpt *r;
998         int err = 0;
999
1000         spin_lock_irq(mlx4_tlock(dev));
1001         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1002         if (!r)
1003                 err = -ENOENT;
1004         else if (r->com.owner != slave)
1005                 err = -EPERM;
1006         else {
1007                 switch (state) {
1008                 case RES_MPT_BUSY:
1009                         err = -EINVAL;
1010                         break;
1011
1012                 case RES_MPT_RESERVED:
1013                         if (r->com.state != RES_MPT_MAPPED)
1014                                 err = -EINVAL;
1015                         break;
1016
1017                 case RES_MPT_MAPPED:
1018                         if (r->com.state != RES_MPT_RESERVED &&
1019                             r->com.state != RES_MPT_HW)
1020                                 err = -EINVAL;
1021                         break;
1022
1023                 case RES_MPT_HW:
1024                         if (r->com.state != RES_MPT_MAPPED)
1025                                 err = -EINVAL;
1026                         break;
1027                 default:
1028                         err = -EINVAL;
1029                 }
1030
1031                 if (!err) {
1032                         r->com.from_state = r->com.state;
1033                         r->com.to_state = state;
1034                         r->com.state = RES_MPT_BUSY;
1035                         if (mpt)
1036                                 *mpt = r;
1037                 }
1038         }
1039
1040         spin_unlock_irq(mlx4_tlock(dev));
1041
1042         return err;
1043 }
1044
1045 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1046                                 enum res_eq_states state, struct res_eq **eq)
1047 {
1048         struct mlx4_priv *priv = mlx4_priv(dev);
1049         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1050         struct res_eq *r;
1051         int err = 0;
1052
1053         spin_lock_irq(mlx4_tlock(dev));
1054         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1055         if (!r)
1056                 err = -ENOENT;
1057         else if (r->com.owner != slave)
1058                 err = -EPERM;
1059         else {
1060                 switch (state) {
1061                 case RES_EQ_BUSY:
1062                         err = -EINVAL;
1063                         break;
1064
1065                 case RES_EQ_RESERVED:
1066                         if (r->com.state != RES_EQ_HW)
1067                                 err = -EINVAL;
1068                         break;
1069
1070                 case RES_EQ_HW:
1071                         if (r->com.state != RES_EQ_RESERVED)
1072                                 err = -EINVAL;
1073                         break;
1074
1075                 default:
1076                         err = -EINVAL;
1077                 }
1078
1079                 if (!err) {
1080                         r->com.from_state = r->com.state;
1081                         r->com.to_state = state;
1082                         r->com.state = RES_EQ_BUSY;
1083                         if (eq)
1084                                 *eq = r;
1085                 }
1086         }
1087
1088         spin_unlock_irq(mlx4_tlock(dev));
1089
1090         return err;
1091 }
1092
1093 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1094                                 enum res_cq_states state, struct res_cq **cq)
1095 {
1096         struct mlx4_priv *priv = mlx4_priv(dev);
1097         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1098         struct res_cq *r;
1099         int err;
1100
1101         spin_lock_irq(mlx4_tlock(dev));
1102         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1103         if (!r)
1104                 err = -ENOENT;
1105         else if (r->com.owner != slave)
1106                 err = -EPERM;
1107         else {
1108                 switch (state) {
1109                 case RES_CQ_BUSY:
1110                         err = -EBUSY;
1111                         break;
1112
1113                 case RES_CQ_ALLOCATED:
1114                         if (r->com.state != RES_CQ_HW)
1115                                 err = -EINVAL;
1116                         else if (atomic_read(&r->ref_count))
1117                                 err = -EBUSY;
1118                         else
1119                                 err = 0;
1120                         break;
1121
1122                 case RES_CQ_HW:
1123                         if (r->com.state != RES_CQ_ALLOCATED)
1124                                 err = -EINVAL;
1125                         else
1126                                 err = 0;
1127                         break;
1128
1129                 default:
1130                         err = -EINVAL;
1131                 }
1132
1133                 if (!err) {
1134                         r->com.from_state = r->com.state;
1135                         r->com.to_state = state;
1136                         r->com.state = RES_CQ_BUSY;
1137                         if (cq)
1138                                 *cq = r;
1139                 }
1140         }
1141
1142         spin_unlock_irq(mlx4_tlock(dev));
1143
1144         return err;
1145 }
1146
1147 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1148                                  enum res_cq_states state, struct res_srq **srq)
1149 {
1150         struct mlx4_priv *priv = mlx4_priv(dev);
1151         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1152         struct res_srq *r;
1153         int err = 0;
1154
1155         spin_lock_irq(mlx4_tlock(dev));
1156         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1157         if (!r)
1158                 err = -ENOENT;
1159         else if (r->com.owner != slave)
1160                 err = -EPERM;
1161         else {
1162                 switch (state) {
1163                 case RES_SRQ_BUSY:
1164                         err = -EINVAL;
1165                         break;
1166
1167                 case RES_SRQ_ALLOCATED:
1168                         if (r->com.state != RES_SRQ_HW)
1169                                 err = -EINVAL;
1170                         else if (atomic_read(&r->ref_count))
1171                                 err = -EBUSY;
1172                         break;
1173
1174                 case RES_SRQ_HW:
1175                         if (r->com.state != RES_SRQ_ALLOCATED)
1176                                 err = -EINVAL;
1177                         break;
1178
1179                 default:
1180                         err = -EINVAL;
1181                 }
1182
1183                 if (!err) {
1184                         r->com.from_state = r->com.state;
1185                         r->com.to_state = state;
1186                         r->com.state = RES_SRQ_BUSY;
1187                         if (srq)
1188                                 *srq = r;
1189                 }
1190         }
1191
1192         spin_unlock_irq(mlx4_tlock(dev));
1193
1194         return err;
1195 }
1196
1197 static void res_abort_move(struct mlx4_dev *dev, int slave,
1198                            enum mlx4_resource type, int id)
1199 {
1200         struct mlx4_priv *priv = mlx4_priv(dev);
1201         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1202         struct res_common *r;
1203
1204         spin_lock_irq(mlx4_tlock(dev));
1205         r = res_tracker_lookup(&tracker->res_tree[type], id);
1206         if (r && (r->owner == slave))
1207                 r->state = r->from_state;
1208         spin_unlock_irq(mlx4_tlock(dev));
1209 }
1210
1211 static void res_end_move(struct mlx4_dev *dev, int slave,
1212                          enum mlx4_resource type, int id)
1213 {
1214         struct mlx4_priv *priv = mlx4_priv(dev);
1215         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1216         struct res_common *r;
1217
1218         spin_lock_irq(mlx4_tlock(dev));
1219         r = res_tracker_lookup(&tracker->res_tree[type], id);
1220         if (r && (r->owner == slave))
1221                 r->state = r->to_state;
1222         spin_unlock_irq(mlx4_tlock(dev));
1223 }
1224
1225 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1226 {
1227         return mlx4_is_qp_reserved(dev, qpn) &&
1228                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1229 }
1230
1231 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1232 {
1233         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1234 }
1235
1236 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1237                         u64 in_param, u64 *out_param)
1238 {
1239         int err;
1240         int count;
1241         int align;
1242         int base;
1243         int qpn;
1244
1245         switch (op) {
1246         case RES_OP_RESERVE:
1247                 count = get_param_l(&in_param);
1248                 align = get_param_h(&in_param);
1249                 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1250                 if (err)
1251                         return err;
1252
1253                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1254                 if (err) {
1255                         __mlx4_qp_release_range(dev, base, count);
1256                         return err;
1257                 }
1258                 set_param_l(out_param, base);
1259                 break;
1260         case RES_OP_MAP_ICM:
1261                 qpn = get_param_l(&in_param) & 0x7fffff;
1262                 if (valid_reserved(dev, slave, qpn)) {
1263                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1264                         if (err)
1265                                 return err;
1266                 }
1267
1268                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1269                                            NULL, 1);
1270                 if (err)
1271                         return err;
1272
1273                 if (!fw_reserved(dev, qpn)) {
1274                         err = __mlx4_qp_alloc_icm(dev, qpn);
1275                         if (err) {
1276                                 res_abort_move(dev, slave, RES_QP, qpn);
1277                                 return err;
1278                         }
1279                 }
1280
1281                 res_end_move(dev, slave, RES_QP, qpn);
1282                 break;
1283
1284         default:
1285                 err = -EINVAL;
1286                 break;
1287         }
1288         return err;
1289 }
1290
1291 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1292                          u64 in_param, u64 *out_param)
1293 {
1294         int err = -EINVAL;
1295         int base;
1296         int order;
1297
1298         if (op != RES_OP_RESERVE_AND_MAP)
1299                 return err;
1300
1301         order = get_param_l(&in_param);
1302         base = __mlx4_alloc_mtt_range(dev, order);
1303         if (base == -1)
1304                 return -ENOMEM;
1305
1306         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1307         if (err)
1308                 __mlx4_free_mtt_range(dev, base, order);
1309         else
1310                 set_param_l(out_param, base);
1311
1312         return err;
1313 }
1314
1315 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1316                          u64 in_param, u64 *out_param)
1317 {
1318         int err = -EINVAL;
1319         int index;
1320         int id;
1321         struct res_mpt *mpt;
1322
1323         switch (op) {
1324         case RES_OP_RESERVE:
1325                 index = __mlx4_mpt_reserve(dev);
1326                 if (index == -1)
1327                         break;
1328                 id = index & mpt_mask(dev);
1329
1330                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1331                 if (err) {
1332                         __mlx4_mpt_release(dev, index);
1333                         break;
1334                 }
1335                 set_param_l(out_param, index);
1336                 break;
1337         case RES_OP_MAP_ICM:
1338                 index = get_param_l(&in_param);
1339                 id = index & mpt_mask(dev);
1340                 err = mr_res_start_move_to(dev, slave, id,
1341                                            RES_MPT_MAPPED, &mpt);
1342                 if (err)
1343                         return err;
1344
1345                 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1346                 if (err) {
1347                         res_abort_move(dev, slave, RES_MPT, id);
1348                         return err;
1349                 }
1350
1351                 res_end_move(dev, slave, RES_MPT, id);
1352                 break;
1353         }
1354         return err;
1355 }
1356
1357 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1358                         u64 in_param, u64 *out_param)
1359 {
1360         int cqn;
1361         int err;
1362
1363         switch (op) {
1364         case RES_OP_RESERVE_AND_MAP:
1365                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1366                 if (err)
1367                         break;
1368
1369                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1370                 if (err) {
1371                         __mlx4_cq_free_icm(dev, cqn);
1372                         break;
1373                 }
1374
1375                 set_param_l(out_param, cqn);
1376                 break;
1377
1378         default:
1379                 err = -EINVAL;
1380         }
1381
1382         return err;
1383 }
1384
1385 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1386                          u64 in_param, u64 *out_param)
1387 {
1388         int srqn;
1389         int err;
1390
1391         switch (op) {
1392         case RES_OP_RESERVE_AND_MAP:
1393                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1394                 if (err)
1395                         break;
1396
1397                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1398                 if (err) {
1399                         __mlx4_srq_free_icm(dev, srqn);
1400                         break;
1401                 }
1402
1403                 set_param_l(out_param, srqn);
1404                 break;
1405
1406         default:
1407                 err = -EINVAL;
1408         }
1409
1410         return err;
1411 }
1412
1413 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1414 {
1415         struct mlx4_priv *priv = mlx4_priv(dev);
1416         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1417         struct mac_res *res;
1418
1419         res = kzalloc(sizeof *res, GFP_KERNEL);
1420         if (!res)
1421                 return -ENOMEM;
1422         res->mac = mac;
1423         res->port = (u8) port;
1424         list_add_tail(&res->list,
1425                       &tracker->slave_list[slave].res_list[RES_MAC]);
1426         return 0;
1427 }
1428
1429 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1430                                int port)
1431 {
1432         struct mlx4_priv *priv = mlx4_priv(dev);
1433         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1434         struct list_head *mac_list =
1435                 &tracker->slave_list[slave].res_list[RES_MAC];
1436         struct mac_res *res, *tmp;
1437
1438         list_for_each_entry_safe(res, tmp, mac_list, list) {
1439                 if (res->mac == mac && res->port == (u8) port) {
1440                         list_del(&res->list);
1441                         kfree(res);
1442                         break;
1443                 }
1444         }
1445 }
1446
1447 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1448 {
1449         struct mlx4_priv *priv = mlx4_priv(dev);
1450         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1451         struct list_head *mac_list =
1452                 &tracker->slave_list[slave].res_list[RES_MAC];
1453         struct mac_res *res, *tmp;
1454
1455         list_for_each_entry_safe(res, tmp, mac_list, list) {
1456                 list_del(&res->list);
1457                 __mlx4_unregister_mac(dev, res->port, res->mac);
1458                 kfree(res);
1459         }
1460 }
1461
1462 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1463                          u64 in_param, u64 *out_param, int in_port)
1464 {
1465         int err = -EINVAL;
1466         int port;
1467         u64 mac;
1468
1469         if (op != RES_OP_RESERVE_AND_MAP)
1470                 return err;
1471
1472         port = !in_port ? get_param_l(out_param) : in_port;
1473         mac = in_param;
1474
1475         err = __mlx4_register_mac(dev, port, mac);
1476         if (err >= 0) {
1477                 set_param_l(out_param, err);
1478                 err = 0;
1479         }
1480
1481         if (!err) {
1482                 err = mac_add_to_slave(dev, slave, mac, port);
1483                 if (err)
1484                         __mlx4_unregister_mac(dev, port, mac);
1485         }
1486         return err;
1487 }
1488
1489 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1490                              int port, int vlan_index)
1491 {
1492         struct mlx4_priv *priv = mlx4_priv(dev);
1493         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1494         struct list_head *vlan_list =
1495                 &tracker->slave_list[slave].res_list[RES_VLAN];
1496         struct vlan_res *res, *tmp;
1497
1498         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1499                 if (res->vlan == vlan && res->port == (u8) port) {
1500                         /* vlan found. update ref count */
1501                         ++res->ref_count;
1502                         return 0;
1503                 }
1504         }
1505
1506         res = kzalloc(sizeof(*res), GFP_KERNEL);
1507         if (!res)
1508                 return -ENOMEM;
1509         res->vlan = vlan;
1510         res->port = (u8) port;
1511         res->vlan_index = vlan_index;
1512         res->ref_count = 1;
1513         list_add_tail(&res->list,
1514                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1515         return 0;
1516 }
1517
1518
1519 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1520                                 int port)
1521 {
1522         struct mlx4_priv *priv = mlx4_priv(dev);
1523         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1524         struct list_head *vlan_list =
1525                 &tracker->slave_list[slave].res_list[RES_VLAN];
1526         struct vlan_res *res, *tmp;
1527
1528         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1529                 if (res->vlan == vlan && res->port == (u8) port) {
1530                         if (!--res->ref_count) {
1531                                 list_del(&res->list);
1532                                 kfree(res);
1533                         }
1534                         break;
1535                 }
1536         }
1537 }
1538
1539 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1540 {
1541         struct mlx4_priv *priv = mlx4_priv(dev);
1542         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1543         struct list_head *vlan_list =
1544                 &tracker->slave_list[slave].res_list[RES_VLAN];
1545         struct vlan_res *res, *tmp;
1546         int i;
1547
1548         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1549                 list_del(&res->list);
1550                 /* dereference the vlan the num times the slave referenced it */
1551                 for (i = 0; i < res->ref_count; i++)
1552                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1553                 kfree(res);
1554         }
1555 }
1556
1557 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1558                           u64 in_param, u64 *out_param, int in_port)
1559 {
1560         struct mlx4_priv *priv = mlx4_priv(dev);
1561         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1562         int err;
1563         u16 vlan;
1564         int vlan_index;
1565         int port;
1566
1567         port = !in_port ? get_param_l(out_param) : in_port;
1568
1569         if (!port || op != RES_OP_RESERVE_AND_MAP)
1570                 return -EINVAL;
1571
1572         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1573         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1574                 slave_state[slave].old_vlan_api = true;
1575                 return 0;
1576         }
1577
1578         vlan = (u16) in_param;
1579
1580         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1581         if (!err) {
1582                 set_param_l(out_param, (u32) vlan_index);
1583                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1584                 if (err)
1585                         __mlx4_unregister_vlan(dev, port, vlan);
1586         }
1587         return err;
1588 }
1589
1590 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1591                              u64 in_param, u64 *out_param)
1592 {
1593         u32 index;
1594         int err;
1595
1596         if (op != RES_OP_RESERVE)
1597                 return -EINVAL;
1598
1599         err = __mlx4_counter_alloc(dev, &index);
1600         if (err)
1601                 return err;
1602
1603         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1604         if (err)
1605                 __mlx4_counter_free(dev, index);
1606         else
1607                 set_param_l(out_param, index);
1608
1609         return err;
1610 }
1611
1612 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1613                            u64 in_param, u64 *out_param)
1614 {
1615         u32 xrcdn;
1616         int err;
1617
1618         if (op != RES_OP_RESERVE)
1619                 return -EINVAL;
1620
1621         err = __mlx4_xrcd_alloc(dev, &xrcdn);
1622         if (err)
1623                 return err;
1624
1625         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1626         if (err)
1627                 __mlx4_xrcd_free(dev, xrcdn);
1628         else
1629                 set_param_l(out_param, xrcdn);
1630
1631         return err;
1632 }
1633
1634 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1635                            struct mlx4_vhcr *vhcr,
1636                            struct mlx4_cmd_mailbox *inbox,
1637                            struct mlx4_cmd_mailbox *outbox,
1638                            struct mlx4_cmd_info *cmd)
1639 {
1640         int err;
1641         int alop = vhcr->op_modifier;
1642
1643         switch (vhcr->in_modifier & 0xFF) {
1644         case RES_QP:
1645                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1646                                    vhcr->in_param, &vhcr->out_param);
1647                 break;
1648
1649         case RES_MTT:
1650                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1651                                     vhcr->in_param, &vhcr->out_param);
1652                 break;
1653
1654         case RES_MPT:
1655                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1656                                     vhcr->in_param, &vhcr->out_param);
1657                 break;
1658
1659         case RES_CQ:
1660                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1661                                    vhcr->in_param, &vhcr->out_param);
1662                 break;
1663
1664         case RES_SRQ:
1665                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1666                                     vhcr->in_param, &vhcr->out_param);
1667                 break;
1668
1669         case RES_MAC:
1670                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1671                                     vhcr->in_param, &vhcr->out_param,
1672                                     (vhcr->in_modifier >> 8) & 0xFF);
1673                 break;
1674
1675         case RES_VLAN:
1676                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1677                                      vhcr->in_param, &vhcr->out_param,
1678                                      (vhcr->in_modifier >> 8) & 0xFF);
1679                 break;
1680
1681         case RES_COUNTER:
1682                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1683                                         vhcr->in_param, &vhcr->out_param);
1684                 break;
1685
1686         case RES_XRCD:
1687                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1688                                       vhcr->in_param, &vhcr->out_param);
1689                 break;
1690
1691         default:
1692                 err = -EINVAL;
1693                 break;
1694         }
1695
1696         return err;
1697 }
1698
1699 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1700                        u64 in_param)
1701 {
1702         int err;
1703         int count;
1704         int base;
1705         int qpn;
1706
1707         switch (op) {
1708         case RES_OP_RESERVE:
1709                 base = get_param_l(&in_param) & 0x7fffff;
1710                 count = get_param_h(&in_param);
1711                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1712                 if (err)
1713                         break;
1714                 __mlx4_qp_release_range(dev, base, count);
1715                 break;
1716         case RES_OP_MAP_ICM:
1717                 qpn = get_param_l(&in_param) & 0x7fffff;
1718                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1719                                            NULL, 0);
1720                 if (err)
1721                         return err;
1722
1723                 if (!fw_reserved(dev, qpn))
1724                         __mlx4_qp_free_icm(dev, qpn);
1725
1726                 res_end_move(dev, slave, RES_QP, qpn);
1727
1728                 if (valid_reserved(dev, slave, qpn))
1729                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1730                 break;
1731         default:
1732                 err = -EINVAL;
1733                 break;
1734         }
1735         return err;
1736 }
1737
1738 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1739                         u64 in_param, u64 *out_param)
1740 {
1741         int err = -EINVAL;
1742         int base;
1743         int order;
1744
1745         if (op != RES_OP_RESERVE_AND_MAP)
1746                 return err;
1747
1748         base = get_param_l(&in_param);
1749         order = get_param_h(&in_param);
1750         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1751         if (!err)
1752                 __mlx4_free_mtt_range(dev, base, order);
1753         return err;
1754 }
1755
1756 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1757                         u64 in_param)
1758 {
1759         int err = -EINVAL;
1760         int index;
1761         int id;
1762         struct res_mpt *mpt;
1763
1764         switch (op) {
1765         case RES_OP_RESERVE:
1766                 index = get_param_l(&in_param);
1767                 id = index & mpt_mask(dev);
1768                 err = get_res(dev, slave, id, RES_MPT, &mpt);
1769                 if (err)
1770                         break;
1771                 index = mpt->key;
1772                 put_res(dev, slave, id, RES_MPT);
1773
1774                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1775                 if (err)
1776                         break;
1777                 __mlx4_mpt_release(dev, index);
1778                 break;
1779         case RES_OP_MAP_ICM:
1780                         index = get_param_l(&in_param);
1781                         id = index & mpt_mask(dev);
1782                         err = mr_res_start_move_to(dev, slave, id,
1783                                                    RES_MPT_RESERVED, &mpt);
1784                         if (err)
1785                                 return err;
1786
1787                         __mlx4_mpt_free_icm(dev, mpt->key);
1788                         res_end_move(dev, slave, RES_MPT, id);
1789                         return err;
1790                 break;
1791         default:
1792                 err = -EINVAL;
1793                 break;
1794         }
1795         return err;
1796 }
1797
1798 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1799                        u64 in_param, u64 *out_param)
1800 {
1801         int cqn;
1802         int err;
1803
1804         switch (op) {
1805         case RES_OP_RESERVE_AND_MAP:
1806                 cqn = get_param_l(&in_param);
1807                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1808                 if (err)
1809                         break;
1810
1811                 __mlx4_cq_free_icm(dev, cqn);
1812                 break;
1813
1814         default:
1815                 err = -EINVAL;
1816                 break;
1817         }
1818
1819         return err;
1820 }
1821
1822 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1823                         u64 in_param, u64 *out_param)
1824 {
1825         int srqn;
1826         int err;
1827
1828         switch (op) {
1829         case RES_OP_RESERVE_AND_MAP:
1830                 srqn = get_param_l(&in_param);
1831                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1832                 if (err)
1833                         break;
1834
1835                 __mlx4_srq_free_icm(dev, srqn);
1836                 break;
1837
1838         default:
1839                 err = -EINVAL;
1840                 break;
1841         }
1842
1843         return err;
1844 }
1845
1846 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1847                             u64 in_param, u64 *out_param, int in_port)
1848 {
1849         int port;
1850         int err = 0;
1851
1852         switch (op) {
1853         case RES_OP_RESERVE_AND_MAP:
1854                 port = !in_port ? get_param_l(out_param) : in_port;
1855                 mac_del_from_slave(dev, slave, in_param, port);
1856                 __mlx4_unregister_mac(dev, port, in_param);
1857                 break;
1858         default:
1859                 err = -EINVAL;
1860                 break;
1861         }
1862
1863         return err;
1864
1865 }
1866
1867 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1868                             u64 in_param, u64 *out_param, int port)
1869 {
1870         struct mlx4_priv *priv = mlx4_priv(dev);
1871         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1872         int err = 0;
1873
1874         switch (op) {
1875         case RES_OP_RESERVE_AND_MAP:
1876                 if (slave_state[slave].old_vlan_api)
1877                         return 0;
1878                 if (!port)
1879                         return -EINVAL;
1880                 vlan_del_from_slave(dev, slave, in_param, port);
1881                 __mlx4_unregister_vlan(dev, port, in_param);
1882                 break;
1883         default:
1884                 err = -EINVAL;
1885                 break;
1886         }
1887
1888         return err;
1889 }
1890
1891 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1892                             u64 in_param, u64 *out_param)
1893 {
1894         int index;
1895         int err;
1896
1897         if (op != RES_OP_RESERVE)
1898                 return -EINVAL;
1899
1900         index = get_param_l(&in_param);
1901         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1902         if (err)
1903                 return err;
1904
1905         __mlx4_counter_free(dev, index);
1906
1907         return err;
1908 }
1909
1910 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1911                           u64 in_param, u64 *out_param)
1912 {
1913         int xrcdn;
1914         int err;
1915
1916         if (op != RES_OP_RESERVE)
1917                 return -EINVAL;
1918
1919         xrcdn = get_param_l(&in_param);
1920         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1921         if (err)
1922                 return err;
1923
1924         __mlx4_xrcd_free(dev, xrcdn);
1925
1926         return err;
1927 }
1928
1929 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1930                           struct mlx4_vhcr *vhcr,
1931                           struct mlx4_cmd_mailbox *inbox,
1932                           struct mlx4_cmd_mailbox *outbox,
1933                           struct mlx4_cmd_info *cmd)
1934 {
1935         int err = -EINVAL;
1936         int alop = vhcr->op_modifier;
1937
1938         switch (vhcr->in_modifier & 0xFF) {
1939         case RES_QP:
1940                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1941                                   vhcr->in_param);
1942                 break;
1943
1944         case RES_MTT:
1945                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1946                                    vhcr->in_param, &vhcr->out_param);
1947                 break;
1948
1949         case RES_MPT:
1950                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1951                                    vhcr->in_param);
1952                 break;
1953
1954         case RES_CQ:
1955                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1956                                   vhcr->in_param, &vhcr->out_param);
1957                 break;
1958
1959         case RES_SRQ:
1960                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1961                                    vhcr->in_param, &vhcr->out_param);
1962                 break;
1963
1964         case RES_MAC:
1965                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1966                                    vhcr->in_param, &vhcr->out_param,
1967                                    (vhcr->in_modifier >> 8) & 0xFF);
1968                 break;
1969
1970         case RES_VLAN:
1971                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1972                                     vhcr->in_param, &vhcr->out_param,
1973                                     (vhcr->in_modifier >> 8) & 0xFF);
1974                 break;
1975
1976         case RES_COUNTER:
1977                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1978                                        vhcr->in_param, &vhcr->out_param);
1979                 break;
1980
1981         case RES_XRCD:
1982                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1983                                      vhcr->in_param, &vhcr->out_param);
1984
1985         default:
1986                 break;
1987         }
1988         return err;
1989 }
1990
1991 /* ugly but other choices are uglier */
1992 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1993 {
1994         return (be32_to_cpu(mpt->flags) >> 9) & 1;
1995 }
1996
1997 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1998 {
1999         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2000 }
2001
2002 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2003 {
2004         return be32_to_cpu(mpt->mtt_sz);
2005 }
2006
2007 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2008 {
2009         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2010 }
2011
2012 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2013 {
2014         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2015 }
2016
2017 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2018 {
2019         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2020 }
2021
2022 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2023 {
2024         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2025 }
2026
2027 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2028 {
2029         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2030 }
2031
2032 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2033 {
2034         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2035 }
2036
2037 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2038 {
2039         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2040         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2041         int log_sq_sride = qpc->sq_size_stride & 7;
2042         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2043         int log_rq_stride = qpc->rq_size_stride & 7;
2044         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2045         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2046         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2047         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2048         int sq_size;
2049         int rq_size;
2050         int total_pages;
2051         int total_mem;
2052         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2053
2054         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2055         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2056         total_mem = sq_size + rq_size;
2057         total_pages =
2058                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2059                                    page_shift);
2060
2061         return total_pages;
2062 }
2063
2064 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2065                            int size, struct res_mtt *mtt)
2066 {
2067         int res_start = mtt->com.res_id;
2068         int res_size = (1 << mtt->order);
2069
2070         if (start < res_start || start + size > res_start + res_size)
2071                 return -EPERM;
2072         return 0;
2073 }
2074
2075 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2076                            struct mlx4_vhcr *vhcr,
2077                            struct mlx4_cmd_mailbox *inbox,
2078                            struct mlx4_cmd_mailbox *outbox,
2079                            struct mlx4_cmd_info *cmd)
2080 {
2081         int err;
2082         int index = vhcr->in_modifier;
2083         struct res_mtt *mtt;
2084         struct res_mpt *mpt;
2085         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2086         int phys;
2087         int id;
2088         u32 pd;
2089         int pd_slave;
2090
2091         id = index & mpt_mask(dev);
2092         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2093         if (err)
2094                 return err;
2095
2096         /* Disable memory windows for VFs. */
2097         if (!mr_is_region(inbox->buf)) {
2098                 err = -EPERM;
2099                 goto ex_abort;
2100         }
2101
2102         /* Make sure that the PD bits related to the slave id are zeros. */
2103         pd = mr_get_pd(inbox->buf);
2104         pd_slave = (pd >> 17) & 0x7f;
2105         if (pd_slave != 0 && pd_slave != slave) {
2106                 err = -EPERM;
2107                 goto ex_abort;
2108         }
2109
2110         if (mr_is_fmr(inbox->buf)) {
2111                 /* FMR and Bind Enable are forbidden in slave devices. */
2112                 if (mr_is_bind_enabled(inbox->buf)) {
2113                         err = -EPERM;
2114                         goto ex_abort;
2115                 }
2116                 /* FMR and Memory Windows are also forbidden. */
2117                 if (!mr_is_region(inbox->buf)) {
2118                         err = -EPERM;
2119                         goto ex_abort;
2120                 }
2121         }
2122
2123         phys = mr_phys_mpt(inbox->buf);
2124         if (!phys) {
2125                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2126                 if (err)
2127                         goto ex_abort;
2128
2129                 err = check_mtt_range(dev, slave, mtt_base,
2130                                       mr_get_mtt_size(inbox->buf), mtt);
2131                 if (err)
2132                         goto ex_put;
2133
2134                 mpt->mtt = mtt;
2135         }
2136
2137         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2138         if (err)
2139                 goto ex_put;
2140
2141         if (!phys) {
2142                 atomic_inc(&mtt->ref_count);
2143                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2144         }
2145
2146         res_end_move(dev, slave, RES_MPT, id);
2147         return 0;
2148
2149 ex_put:
2150         if (!phys)
2151                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2152 ex_abort:
2153         res_abort_move(dev, slave, RES_MPT, id);
2154
2155         return err;
2156 }
2157
2158 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2159                            struct mlx4_vhcr *vhcr,
2160                            struct mlx4_cmd_mailbox *inbox,
2161                            struct mlx4_cmd_mailbox *outbox,
2162                            struct mlx4_cmd_info *cmd)
2163 {
2164         int err;
2165         int index = vhcr->in_modifier;
2166         struct res_mpt *mpt;
2167         int id;
2168
2169         id = index & mpt_mask(dev);
2170         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2171         if (err)
2172                 return err;
2173
2174         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2175         if (err)
2176                 goto ex_abort;
2177
2178         if (mpt->mtt)
2179                 atomic_dec(&mpt->mtt->ref_count);
2180
2181         res_end_move(dev, slave, RES_MPT, id);
2182         return 0;
2183
2184 ex_abort:
2185         res_abort_move(dev, slave, RES_MPT, id);
2186
2187         return err;
2188 }
2189
2190 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2191                            struct mlx4_vhcr *vhcr,
2192                            struct mlx4_cmd_mailbox *inbox,
2193                            struct mlx4_cmd_mailbox *outbox,
2194                            struct mlx4_cmd_info *cmd)
2195 {
2196         int err;
2197         int index = vhcr->in_modifier;
2198         struct res_mpt *mpt;
2199         int id;
2200
2201         id = index & mpt_mask(dev);
2202         err = get_res(dev, slave, id, RES_MPT, &mpt);
2203         if (err)
2204                 return err;
2205
2206         if (mpt->com.from_state != RES_MPT_HW) {
2207                 err = -EBUSY;
2208                 goto out;
2209         }
2210
2211         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2212
2213 out:
2214         put_res(dev, slave, id, RES_MPT);
2215         return err;
2216 }
2217
2218 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2219 {
2220         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2221 }
2222
2223 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2224 {
2225         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2226 }
2227
2228 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2229 {
2230         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2231 }
2232
2233 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2234                                   struct mlx4_qp_context *context)
2235 {
2236         u32 qpn = vhcr->in_modifier & 0xffffff;
2237         u32 qkey = 0;
2238
2239         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2240                 return;
2241
2242         /* adjust qkey in qp context */
2243         context->qkey = cpu_to_be32(qkey);
2244 }
2245
2246 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2247                              struct mlx4_vhcr *vhcr,
2248                              struct mlx4_cmd_mailbox *inbox,
2249                              struct mlx4_cmd_mailbox *outbox,
2250                              struct mlx4_cmd_info *cmd)
2251 {
2252         int err;
2253         int qpn = vhcr->in_modifier & 0x7fffff;
2254         struct res_mtt *mtt;
2255         struct res_qp *qp;
2256         struct mlx4_qp_context *qpc = inbox->buf + 8;
2257         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2258         int mtt_size = qp_get_mtt_size(qpc);
2259         struct res_cq *rcq;
2260         struct res_cq *scq;
2261         int rcqn = qp_get_rcqn(qpc);
2262         int scqn = qp_get_scqn(qpc);
2263         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2264         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2265         struct res_srq *srq;
2266         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2267
2268         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2269         if (err)
2270                 return err;
2271         qp->local_qpn = local_qpn;
2272         qp->sched_queue = 0;
2273         qp->qpc_flags = be32_to_cpu(qpc->flags);
2274
2275         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2276         if (err)
2277                 goto ex_abort;
2278
2279         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2280         if (err)
2281                 goto ex_put_mtt;
2282
2283         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2284         if (err)
2285                 goto ex_put_mtt;
2286
2287         if (scqn != rcqn) {
2288                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2289                 if (err)
2290                         goto ex_put_rcq;
2291         } else
2292                 scq = rcq;
2293
2294         if (use_srq) {
2295                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2296                 if (err)
2297                         goto ex_put_scq;
2298         }
2299
2300         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2301         update_pkey_index(dev, slave, inbox);
2302         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2303         if (err)
2304                 goto ex_put_srq;
2305         atomic_inc(&mtt->ref_count);
2306         qp->mtt = mtt;
2307         atomic_inc(&rcq->ref_count);
2308         qp->rcq = rcq;
2309         atomic_inc(&scq->ref_count);
2310         qp->scq = scq;
2311
2312         if (scqn != rcqn)
2313                 put_res(dev, slave, scqn, RES_CQ);
2314
2315         if (use_srq) {
2316                 atomic_inc(&srq->ref_count);
2317                 put_res(dev, slave, srqn, RES_SRQ);
2318                 qp->srq = srq;
2319         }
2320         put_res(dev, slave, rcqn, RES_CQ);
2321         put_res(dev, slave, mtt_base, RES_MTT);
2322         res_end_move(dev, slave, RES_QP, qpn);
2323
2324         return 0;
2325
2326 ex_put_srq:
2327         if (use_srq)
2328                 put_res(dev, slave, srqn, RES_SRQ);
2329 ex_put_scq:
2330         if (scqn != rcqn)
2331                 put_res(dev, slave, scqn, RES_CQ);
2332 ex_put_rcq:
2333         put_res(dev, slave, rcqn, RES_CQ);
2334 ex_put_mtt:
2335         put_res(dev, slave, mtt_base, RES_MTT);
2336 ex_abort:
2337         res_abort_move(dev, slave, RES_QP, qpn);
2338
2339         return err;
2340 }
2341
2342 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2343 {
2344         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2345 }
2346
2347 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2348 {
2349         int log_eq_size = eqc->log_eq_size & 0x1f;
2350         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2351
2352         if (log_eq_size + 5 < page_shift)
2353                 return 1;
2354
2355         return 1 << (log_eq_size + 5 - page_shift);
2356 }
2357
2358 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2359 {
2360         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2361 }
2362
2363 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2364 {
2365         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2366         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2367
2368         if (log_cq_size + 5 < page_shift)
2369                 return 1;
2370
2371         return 1 << (log_cq_size + 5 - page_shift);
2372 }
2373
2374 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2375                           struct mlx4_vhcr *vhcr,
2376                           struct mlx4_cmd_mailbox *inbox,
2377                           struct mlx4_cmd_mailbox *outbox,
2378                           struct mlx4_cmd_info *cmd)
2379 {
2380         int err;
2381         int eqn = vhcr->in_modifier;
2382         int res_id = (slave << 8) | eqn;
2383         struct mlx4_eq_context *eqc = inbox->buf;
2384         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2385         int mtt_size = eq_get_mtt_size(eqc);
2386         struct res_eq *eq;
2387         struct res_mtt *mtt;
2388
2389         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2390         if (err)
2391                 return err;
2392         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2393         if (err)
2394                 goto out_add;
2395
2396         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2397         if (err)
2398                 goto out_move;
2399
2400         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2401         if (err)
2402                 goto out_put;
2403
2404         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2405         if (err)
2406                 goto out_put;
2407
2408         atomic_inc(&mtt->ref_count);
2409         eq->mtt = mtt;
2410         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2411         res_end_move(dev, slave, RES_EQ, res_id);
2412         return 0;
2413
2414 out_put:
2415         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2416 out_move:
2417         res_abort_move(dev, slave, RES_EQ, res_id);
2418 out_add:
2419         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2420         return err;
2421 }
2422
2423 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2424                               int len, struct res_mtt **res)
2425 {
2426         struct mlx4_priv *priv = mlx4_priv(dev);
2427         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2428         struct res_mtt *mtt;
2429         int err = -EINVAL;
2430
2431         spin_lock_irq(mlx4_tlock(dev));
2432         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2433                             com.list) {
2434                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2435                         *res = mtt;
2436                         mtt->com.from_state = mtt->com.state;
2437                         mtt->com.state = RES_MTT_BUSY;
2438                         err = 0;
2439                         break;
2440                 }
2441         }
2442         spin_unlock_irq(mlx4_tlock(dev));
2443
2444         return err;
2445 }
2446
2447 static int verify_qp_parameters(struct mlx4_dev *dev,
2448                                 struct mlx4_cmd_mailbox *inbox,
2449                                 enum qp_transition transition, u8 slave)
2450 {
2451         u32                     qp_type;
2452         struct mlx4_qp_context  *qp_ctx;
2453         enum mlx4_qp_optpar     optpar;
2454
2455         qp_ctx  = inbox->buf + 8;
2456         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2457         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2458
2459         switch (qp_type) {
2460         case MLX4_QP_ST_RC:
2461         case MLX4_QP_ST_UC:
2462                 switch (transition) {
2463                 case QP_TRANS_INIT2RTR:
2464                 case QP_TRANS_RTR2RTS:
2465                 case QP_TRANS_RTS2RTS:
2466                 case QP_TRANS_SQD2SQD:
2467                 case QP_TRANS_SQD2RTS:
2468                         if (slave != mlx4_master_func_num(dev))
2469                                 /* slaves have only gid index 0 */
2470                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2471                                         if (qp_ctx->pri_path.mgid_index)
2472                                                 return -EINVAL;
2473                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2474                                         if (qp_ctx->alt_path.mgid_index)
2475                                                 return -EINVAL;
2476                         break;
2477                 default:
2478                         break;
2479                 }
2480
2481                 break;
2482         default:
2483                 break;
2484         }
2485
2486         return 0;
2487 }
2488
2489 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2490                            struct mlx4_vhcr *vhcr,
2491                            struct mlx4_cmd_mailbox *inbox,
2492                            struct mlx4_cmd_mailbox *outbox,
2493                            struct mlx4_cmd_info *cmd)
2494 {
2495         struct mlx4_mtt mtt;
2496         __be64 *page_list = inbox->buf;
2497         u64 *pg_list = (u64 *)page_list;
2498         int i;
2499         struct res_mtt *rmtt = NULL;
2500         int start = be64_to_cpu(page_list[0]);
2501         int npages = vhcr->in_modifier;
2502         int err;
2503
2504         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2505         if (err)
2506                 return err;
2507
2508         /* Call the SW implementation of write_mtt:
2509          * - Prepare a dummy mtt struct
2510          * - Translate inbox contents to simple addresses in host endianess */
2511         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2512                             we don't really use it */
2513         mtt.order = 0;
2514         mtt.page_shift = 0;
2515         for (i = 0; i < npages; ++i)
2516                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2517
2518         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2519                                ((u64 *)page_list + 2));
2520
2521         if (rmtt)
2522                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2523
2524         return err;
2525 }
2526
2527 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2528                           struct mlx4_vhcr *vhcr,
2529                           struct mlx4_cmd_mailbox *inbox,
2530                           struct mlx4_cmd_mailbox *outbox,
2531                           struct mlx4_cmd_info *cmd)
2532 {
2533         int eqn = vhcr->in_modifier;
2534         int res_id = eqn | (slave << 8);
2535         struct res_eq *eq;
2536         int err;
2537
2538         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2539         if (err)
2540                 return err;
2541
2542         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2543         if (err)
2544                 goto ex_abort;
2545
2546         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2547         if (err)
2548                 goto ex_put;
2549
2550         atomic_dec(&eq->mtt->ref_count);
2551         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2552         res_end_move(dev, slave, RES_EQ, res_id);
2553         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2554
2555         return 0;
2556
2557 ex_put:
2558         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2559 ex_abort:
2560         res_abort_move(dev, slave, RES_EQ, res_id);
2561
2562         return err;
2563 }
2564
2565 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2566 {
2567         struct mlx4_priv *priv = mlx4_priv(dev);
2568         struct mlx4_slave_event_eq_info *event_eq;
2569         struct mlx4_cmd_mailbox *mailbox;
2570         u32 in_modifier = 0;
2571         int err;
2572         int res_id;
2573         struct res_eq *req;
2574
2575         if (!priv->mfunc.master.slave_state)
2576                 return -EINVAL;
2577
2578         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2579
2580         /* Create the event only if the slave is registered */
2581         if (event_eq->eqn < 0)
2582                 return 0;
2583
2584         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2585         res_id = (slave << 8) | event_eq->eqn;
2586         err = get_res(dev, slave, res_id, RES_EQ, &req);
2587         if (err)
2588                 goto unlock;
2589
2590         if (req->com.from_state != RES_EQ_HW) {
2591                 err = -EINVAL;
2592                 goto put;
2593         }
2594
2595         mailbox = mlx4_alloc_cmd_mailbox(dev);
2596         if (IS_ERR(mailbox)) {
2597                 err = PTR_ERR(mailbox);
2598                 goto put;
2599         }
2600
2601         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2602                 ++event_eq->token;
2603                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2604         }
2605
2606         memcpy(mailbox->buf, (u8 *) eqe, 28);
2607
2608         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2609
2610         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2611                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2612                        MLX4_CMD_NATIVE);
2613
2614         put_res(dev, slave, res_id, RES_EQ);
2615         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2616         mlx4_free_cmd_mailbox(dev, mailbox);
2617         return err;
2618
2619 put:
2620         put_res(dev, slave, res_id, RES_EQ);
2621
2622 unlock:
2623         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2624         return err;
2625 }
2626
2627 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2628                           struct mlx4_vhcr *vhcr,
2629                           struct mlx4_cmd_mailbox *inbox,
2630                           struct mlx4_cmd_mailbox *outbox,
2631                           struct mlx4_cmd_info *cmd)
2632 {
2633         int eqn = vhcr->in_modifier;
2634         int res_id = eqn | (slave << 8);
2635         struct res_eq *eq;
2636         int err;
2637
2638         err = get_res(dev, slave, res_id, RES_EQ, &eq);
2639         if (err)
2640                 return err;
2641
2642         if (eq->com.from_state != RES_EQ_HW) {
2643                 err = -EINVAL;
2644                 goto ex_put;
2645         }
2646
2647         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2648
2649 ex_put:
2650         put_res(dev, slave, res_id, RES_EQ);
2651         return err;
2652 }
2653
2654 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2655                           struct mlx4_vhcr *vhcr,
2656                           struct mlx4_cmd_mailbox *inbox,
2657                           struct mlx4_cmd_mailbox *outbox,
2658                           struct mlx4_cmd_info *cmd)
2659 {
2660         int err;
2661         int cqn = vhcr->in_modifier;
2662         struct mlx4_cq_context *cqc = inbox->buf;
2663         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2664         struct res_cq *cq;
2665         struct res_mtt *mtt;
2666
2667         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2668         if (err)
2669                 return err;
2670         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2671         if (err)
2672                 goto out_move;
2673         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2674         if (err)
2675                 goto out_put;
2676         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2677         if (err)
2678                 goto out_put;
2679         atomic_inc(&mtt->ref_count);
2680         cq->mtt = mtt;
2681         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2682         res_end_move(dev, slave, RES_CQ, cqn);
2683         return 0;
2684
2685 out_put:
2686         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2687 out_move:
2688         res_abort_move(dev, slave, RES_CQ, cqn);
2689         return err;
2690 }
2691
2692 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2693                           struct mlx4_vhcr *vhcr,
2694                           struct mlx4_cmd_mailbox *inbox,
2695                           struct mlx4_cmd_mailbox *outbox,
2696                           struct mlx4_cmd_info *cmd)
2697 {
2698         int err;
2699         int cqn = vhcr->in_modifier;
2700         struct res_cq *cq;
2701
2702         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2703         if (err)
2704                 return err;
2705         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2706         if (err)
2707                 goto out_move;
2708         atomic_dec(&cq->mtt->ref_count);
2709         res_end_move(dev, slave, RES_CQ, cqn);
2710         return 0;
2711
2712 out_move:
2713         res_abort_move(dev, slave, RES_CQ, cqn);
2714         return err;
2715 }
2716
2717 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2718                           struct mlx4_vhcr *vhcr,
2719                           struct mlx4_cmd_mailbox *inbox,
2720                           struct mlx4_cmd_mailbox *outbox,
2721                           struct mlx4_cmd_info *cmd)
2722 {
2723         int cqn = vhcr->in_modifier;
2724         struct res_cq *cq;
2725         int err;
2726
2727         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2728         if (err)
2729                 return err;
2730
2731         if (cq->com.from_state != RES_CQ_HW)
2732                 goto ex_put;
2733
2734         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2735 ex_put:
2736         put_res(dev, slave, cqn, RES_CQ);
2737
2738         return err;
2739 }
2740
2741 static int handle_resize(struct mlx4_dev *dev, int slave,
2742                          struct mlx4_vhcr *vhcr,
2743                          struct mlx4_cmd_mailbox *inbox,
2744                          struct mlx4_cmd_mailbox *outbox,
2745                          struct mlx4_cmd_info *cmd,
2746                          struct res_cq *cq)
2747 {
2748         int err;
2749         struct res_mtt *orig_mtt;
2750         struct res_mtt *mtt;
2751         struct mlx4_cq_context *cqc = inbox->buf;
2752         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2753
2754         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2755         if (err)
2756                 return err;
2757
2758         if (orig_mtt != cq->mtt) {
2759                 err = -EINVAL;
2760                 goto ex_put;
2761         }
2762
2763         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2764         if (err)
2765                 goto ex_put;
2766
2767         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2768         if (err)
2769                 goto ex_put1;
2770         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2771         if (err)
2772                 goto ex_put1;
2773         atomic_dec(&orig_mtt->ref_count);
2774         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2775         atomic_inc(&mtt->ref_count);
2776         cq->mtt = mtt;
2777         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2778         return 0;
2779
2780 ex_put1:
2781         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2782 ex_put:
2783         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2784
2785         return err;
2786
2787 }
2788
2789 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2790                            struct mlx4_vhcr *vhcr,
2791                            struct mlx4_cmd_mailbox *inbox,
2792                            struct mlx4_cmd_mailbox *outbox,
2793                            struct mlx4_cmd_info *cmd)
2794 {
2795         int cqn = vhcr->in_modifier;
2796         struct res_cq *cq;
2797         int err;
2798
2799         err = get_res(dev, slave, cqn, RES_CQ, &cq);
2800         if (err)
2801                 return err;
2802
2803         if (cq->com.from_state != RES_CQ_HW)
2804                 goto ex_put;
2805
2806         if (vhcr->op_modifier == 0) {
2807                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2808                 goto ex_put;
2809         }
2810
2811         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2812 ex_put:
2813         put_res(dev, slave, cqn, RES_CQ);
2814
2815         return err;
2816 }
2817
2818 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2819 {
2820         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2821         int log_rq_stride = srqc->logstride & 7;
2822         int page_shift = (srqc->log_page_size & 0x3f) + 12;
2823
2824         if (log_srq_size + log_rq_stride + 4 < page_shift)
2825                 return 1;
2826
2827         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2828 }
2829
2830 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2831                            struct mlx4_vhcr *vhcr,
2832                            struct mlx4_cmd_mailbox *inbox,
2833                            struct mlx4_cmd_mailbox *outbox,
2834                            struct mlx4_cmd_info *cmd)
2835 {
2836         int err;
2837         int srqn = vhcr->in_modifier;
2838         struct res_mtt *mtt;
2839         struct res_srq *srq;
2840         struct mlx4_srq_context *srqc = inbox->buf;
2841         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2842
2843         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2844                 return -EINVAL;
2845
2846         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2847         if (err)
2848                 return err;
2849         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2850         if (err)
2851                 goto ex_abort;
2852         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2853                               mtt);
2854         if (err)
2855                 goto ex_put_mtt;
2856
2857         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2858         if (err)
2859                 goto ex_put_mtt;
2860
2861         atomic_inc(&mtt->ref_count);
2862         srq->mtt = mtt;
2863         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2864         res_end_move(dev, slave, RES_SRQ, srqn);
2865         return 0;
2866
2867 ex_put_mtt:
2868         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2869 ex_abort:
2870         res_abort_move(dev, slave, RES_SRQ, srqn);
2871
2872         return err;
2873 }
2874
2875 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2876                            struct mlx4_vhcr *vhcr,
2877                            struct mlx4_cmd_mailbox *inbox,
2878                            struct mlx4_cmd_mailbox *outbox,
2879                            struct mlx4_cmd_info *cmd)
2880 {
2881         int err;
2882         int srqn = vhcr->in_modifier;
2883         struct res_srq *srq;
2884
2885         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2886         if (err)
2887                 return err;
2888         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2889         if (err)
2890                 goto ex_abort;
2891         atomic_dec(&srq->mtt->ref_count);
2892         if (srq->cq)
2893                 atomic_dec(&srq->cq->ref_count);
2894         res_end_move(dev, slave, RES_SRQ, srqn);
2895
2896         return 0;
2897
2898 ex_abort:
2899         res_abort_move(dev, slave, RES_SRQ, srqn);
2900
2901         return err;
2902 }
2903
2904 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2905                            struct mlx4_vhcr *vhcr,
2906                            struct mlx4_cmd_mailbox *inbox,
2907                            struct mlx4_cmd_mailbox *outbox,
2908                            struct mlx4_cmd_info *cmd)
2909 {
2910         int err;
2911         int srqn = vhcr->in_modifier;
2912         struct res_srq *srq;
2913
2914         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2915         if (err)
2916                 return err;
2917         if (srq->com.from_state != RES_SRQ_HW) {
2918                 err = -EBUSY;
2919                 goto out;
2920         }
2921         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2922 out:
2923         put_res(dev, slave, srqn, RES_SRQ);
2924         return err;
2925 }
2926
2927 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2928                          struct mlx4_vhcr *vhcr,
2929                          struct mlx4_cmd_mailbox *inbox,
2930                          struct mlx4_cmd_mailbox *outbox,
2931                          struct mlx4_cmd_info *cmd)
2932 {
2933         int err;
2934         int srqn = vhcr->in_modifier;
2935         struct res_srq *srq;
2936
2937         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2938         if (err)
2939                 return err;
2940
2941         if (srq->com.from_state != RES_SRQ_HW) {
2942                 err = -EBUSY;
2943                 goto out;
2944         }
2945
2946         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2947 out:
2948         put_res(dev, slave, srqn, RES_SRQ);
2949         return err;
2950 }
2951
2952 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2953                         struct mlx4_vhcr *vhcr,
2954                         struct mlx4_cmd_mailbox *inbox,
2955                         struct mlx4_cmd_mailbox *outbox,
2956                         struct mlx4_cmd_info *cmd)
2957 {
2958         int err;
2959         int qpn = vhcr->in_modifier & 0x7fffff;
2960         struct res_qp *qp;
2961
2962         err = get_res(dev, slave, qpn, RES_QP, &qp);
2963         if (err)
2964                 return err;
2965         if (qp->com.from_state != RES_QP_HW) {
2966                 err = -EBUSY;
2967                 goto out;
2968         }
2969
2970         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2971 out:
2972         put_res(dev, slave, qpn, RES_QP);
2973         return err;
2974 }
2975
2976 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2977                               struct mlx4_vhcr *vhcr,
2978                               struct mlx4_cmd_mailbox *inbox,
2979                               struct mlx4_cmd_mailbox *outbox,
2980                               struct mlx4_cmd_info *cmd)
2981 {
2982         struct mlx4_qp_context *context = inbox->buf + 8;
2983         adjust_proxy_tun_qkey(dev, vhcr, context);
2984         update_pkey_index(dev, slave, inbox);
2985         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2986 }
2987
2988 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2989                              struct mlx4_vhcr *vhcr,
2990                              struct mlx4_cmd_mailbox *inbox,
2991                              struct mlx4_cmd_mailbox *outbox,
2992                              struct mlx4_cmd_info *cmd)
2993 {
2994         int err;
2995         struct mlx4_qp_context *qpc = inbox->buf + 8;
2996         int qpn = vhcr->in_modifier & 0x7fffff;
2997         struct res_qp *qp;
2998         u8 orig_sched_queue;
2999
3000         err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3001         if (err)
3002                 return err;
3003
3004         update_pkey_index(dev, slave, inbox);
3005         update_gid(dev, inbox, (u8)slave);
3006         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3007         orig_sched_queue = qpc->pri_path.sched_queue;
3008         err = update_vport_qp_param(dev, inbox, slave, qpn);
3009         if (err)
3010                 return err;
3011
3012         err = get_res(dev, slave, qpn, RES_QP, &qp);
3013         if (err)
3014                 return err;
3015         if (qp->com.from_state != RES_QP_HW) {
3016                 err = -EBUSY;
3017                 goto out;
3018         }
3019
3020         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3021 out:
3022         /* if no error, save sched queue value passed in by VF. This is
3023          * essentially the QOS value provided by the VF. This will be useful
3024          * if we allow dynamic changes from VST back to VGT
3025          */
3026         if (!err)
3027                 qp->sched_queue = orig_sched_queue;
3028
3029         put_res(dev, slave, qpn, RES_QP);
3030         return err;
3031 }
3032
3033 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3034                             struct mlx4_vhcr *vhcr,
3035                             struct mlx4_cmd_mailbox *inbox,
3036                             struct mlx4_cmd_mailbox *outbox,
3037                             struct mlx4_cmd_info *cmd)
3038 {
3039         int err;
3040         struct mlx4_qp_context *context = inbox->buf + 8;
3041
3042         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3043         if (err)
3044                 return err;
3045
3046         update_pkey_index(dev, slave, inbox);
3047         update_gid(dev, inbox, (u8)slave);
3048         adjust_proxy_tun_qkey(dev, vhcr, context);
3049         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3050 }
3051
3052 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3053                             struct mlx4_vhcr *vhcr,
3054                             struct mlx4_cmd_mailbox *inbox,
3055                             struct mlx4_cmd_mailbox *outbox,
3056                             struct mlx4_cmd_info *cmd)
3057 {
3058         int err;
3059         struct mlx4_qp_context *context = inbox->buf + 8;
3060
3061         err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3062         if (err)
3063                 return err;
3064
3065         update_pkey_index(dev, slave, inbox);
3066         update_gid(dev, inbox, (u8)slave);
3067         adjust_proxy_tun_qkey(dev, vhcr, context);
3068         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3069 }
3070
3071
3072 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3073                               struct mlx4_vhcr *vhcr,
3074                               struct mlx4_cmd_mailbox *inbox,
3075                               struct mlx4_cmd_mailbox *outbox,
3076                               struct mlx4_cmd_info *cmd)
3077 {
3078         struct mlx4_qp_context *context = inbox->buf + 8;
3079         adjust_proxy_tun_qkey(dev, vhcr, context);
3080         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3081 }
3082
3083 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3084                             struct mlx4_vhcr *vhcr,
3085                             struct mlx4_cmd_mailbox *inbox,
3086                             struct mlx4_cmd_mailbox *outbox,
3087                             struct mlx4_cmd_info *cmd)
3088 {
3089         int err;
3090         struct mlx4_qp_context *context = inbox->buf + 8;
3091
3092         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3093         if (err)
3094                 return err;
3095
3096         adjust_proxy_tun_qkey(dev, vhcr, context);
3097         update_gid(dev, inbox, (u8)slave);
3098         update_pkey_index(dev, slave, inbox);
3099         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3100 }
3101
3102 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3103                             struct mlx4_vhcr *vhcr,
3104                             struct mlx4_cmd_mailbox *inbox,
3105                             struct mlx4_cmd_mailbox *outbox,
3106                             struct mlx4_cmd_info *cmd)
3107 {
3108         int err;
3109         struct mlx4_qp_context *context = inbox->buf + 8;
3110
3111         err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3112         if (err)
3113                 return err;
3114
3115         adjust_proxy_tun_qkey(dev, vhcr, context);
3116         update_gid(dev, inbox, (u8)slave);
3117         update_pkey_index(dev, slave, inbox);
3118         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3119 }
3120
3121 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3122                          struct mlx4_vhcr *vhcr,
3123                          struct mlx4_cmd_mailbox *inbox,
3124                          struct mlx4_cmd_mailbox *outbox,
3125                          struct mlx4_cmd_info *cmd)
3126 {
3127         int err;
3128         int qpn = vhcr->in_modifier & 0x7fffff;
3129         struct res_qp *qp;
3130
3131         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3132         if (err)
3133                 return err;
3134         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3135         if (err)
3136                 goto ex_abort;
3137
3138         atomic_dec(&qp->mtt->ref_count);
3139         atomic_dec(&qp->rcq->ref_count);
3140         atomic_dec(&qp->scq->ref_count);
3141         if (qp->srq)
3142                 atomic_dec(&qp->srq->ref_count);
3143         res_end_move(dev, slave, RES_QP, qpn);
3144         return 0;
3145
3146 ex_abort:
3147         res_abort_move(dev, slave, RES_QP, qpn);
3148
3149         return err;
3150 }
3151
3152 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3153                                 struct res_qp *rqp, u8 *gid)
3154 {
3155         struct res_gid *res;
3156
3157         list_for_each_entry(res, &rqp->mcg_list, list) {
3158                 if (!memcmp(res->gid, gid, 16))
3159                         return res;
3160         }
3161         return NULL;
3162 }
3163
3164 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3165                        u8 *gid, enum mlx4_protocol prot,
3166                        enum mlx4_steer_type steer, u64 reg_id)
3167 {
3168         struct res_gid *res;
3169         int err;
3170
3171         res = kzalloc(sizeof *res, GFP_KERNEL);
3172         if (!res)
3173                 return -ENOMEM;
3174
3175         spin_lock_irq(&rqp->mcg_spl);
3176         if (find_gid(dev, slave, rqp, gid)) {
3177                 kfree(res);
3178                 err = -EEXIST;
3179         } else {
3180                 memcpy(res->gid, gid, 16);
3181                 res->prot = prot;
3182                 res->steer = steer;
3183                 res->reg_id = reg_id;
3184                 list_add_tail(&res->list, &rqp->mcg_list);
3185                 err = 0;
3186         }
3187         spin_unlock_irq(&rqp->mcg_spl);
3188
3189         return err;
3190 }
3191
3192 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3193                        u8 *gid, enum mlx4_protocol prot,
3194                        enum mlx4_steer_type steer, u64 *reg_id)
3195 {
3196         struct res_gid *res;
3197         int err;
3198
3199         spin_lock_irq(&rqp->mcg_spl);
3200         res = find_gid(dev, slave, rqp, gid);
3201         if (!res || res->prot != prot || res->steer != steer)
3202                 err = -EINVAL;
3203         else {
3204                 *reg_id = res->reg_id;
3205                 list_del(&res->list);
3206                 kfree(res);
3207                 err = 0;
3208         }
3209         spin_unlock_irq(&rqp->mcg_spl);
3210
3211         return err;
3212 }
3213
3214 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3215                      int block_loopback, enum mlx4_protocol prot,
3216                      enum mlx4_steer_type type, u64 *reg_id)
3217 {
3218         switch (dev->caps.steering_mode) {
3219         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3220                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3221                                                 block_loopback, prot,
3222                                                 reg_id);
3223         case MLX4_STEERING_MODE_B0:
3224                 return mlx4_qp_attach_common(dev, qp, gid,
3225                                             block_loopback, prot, type);
3226         default:
3227                 return -EINVAL;
3228         }
3229 }
3230
3231 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3232                      enum mlx4_protocol prot, enum mlx4_steer_type type,
3233                      u64 reg_id)
3234 {
3235         switch (dev->caps.steering_mode) {
3236         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3237                 return mlx4_flow_detach(dev, reg_id);
3238         case MLX4_STEERING_MODE_B0:
3239                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3240         default:
3241                 return -EINVAL;
3242         }
3243 }
3244
3245 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3246                                struct mlx4_vhcr *vhcr,
3247                                struct mlx4_cmd_mailbox *inbox,
3248                                struct mlx4_cmd_mailbox *outbox,
3249                                struct mlx4_cmd_info *cmd)
3250 {
3251         struct mlx4_qp qp; /* dummy for calling attach/detach */
3252         u8 *gid = inbox->buf;
3253         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3254         int err;
3255         int qpn;
3256         struct res_qp *rqp;
3257         u64 reg_id = 0;
3258         int attach = vhcr->op_modifier;
3259         int block_loopback = vhcr->in_modifier >> 31;
3260         u8 steer_type_mask = 2;
3261         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3262
3263         qpn = vhcr->in_modifier & 0xffffff;
3264         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3265         if (err)
3266                 return err;
3267
3268         qp.qpn = qpn;
3269         if (attach) {
3270                 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3271                                 type, &reg_id);
3272                 if (err) {
3273                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3274                         goto ex_put;
3275                 }
3276                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3277                 if (err)
3278                         goto ex_detach;
3279         } else {
3280                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3281                 if (err)
3282                         goto ex_put;
3283
3284                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3285                 if (err)
3286                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3287                                qpn, reg_id);
3288         }
3289         put_res(dev, slave, qpn, RES_QP);
3290         return err;
3291
3292 ex_detach:
3293         qp_detach(dev, &qp, gid, prot, type, reg_id);
3294 ex_put:
3295         put_res(dev, slave, qpn, RES_QP);
3296         return err;
3297 }
3298
3299 /*
3300  * MAC validation for Flow Steering rules.
3301  * VF can attach rules only with a mac address which is assigned to it.
3302  */
3303 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3304                                    struct list_head *rlist)
3305 {
3306         struct mac_res *res, *tmp;
3307         __be64 be_mac;
3308
3309         /* make sure it isn't multicast or broadcast mac*/
3310         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3311             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3312                 list_for_each_entry_safe(res, tmp, rlist, list) {
3313                         be_mac = cpu_to_be64(res->mac << 16);
3314                         if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3315                                 return 0;
3316                 }
3317                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3318                        eth_header->eth.dst_mac, slave);
3319                 return -EINVAL;
3320         }
3321         return 0;
3322 }
3323
3324 /*
3325  * In case of missing eth header, append eth header with a MAC address
3326  * assigned to the VF.
3327  */
3328 static int add_eth_header(struct mlx4_dev *dev, int slave,
3329                           struct mlx4_cmd_mailbox *inbox,
3330                           struct list_head *rlist, int header_id)
3331 {
3332         struct mac_res *res, *tmp;
3333         u8 port;
3334         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3335         struct mlx4_net_trans_rule_hw_eth *eth_header;
3336         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3337         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3338         __be64 be_mac = 0;
3339         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3340
3341         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3342         port = ctrl->port;
3343         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3344
3345         /* Clear a space in the inbox for eth header */
3346         switch (header_id) {
3347         case MLX4_NET_TRANS_RULE_ID_IPV4:
3348                 ip_header =
3349                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3350                 memmove(ip_header, eth_header,
3351                         sizeof(*ip_header) + sizeof(*l4_header));
3352                 break;
3353         case MLX4_NET_TRANS_RULE_ID_TCP:
3354         case MLX4_NET_TRANS_RULE_ID_UDP:
3355                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3356                             (eth_header + 1);
3357                 memmove(l4_header, eth_header, sizeof(*l4_header));
3358                 break;
3359         default:
3360                 return -EINVAL;
3361         }
3362         list_for_each_entry_safe(res, tmp, rlist, list) {
3363                 if (port == res->port) {
3364                         be_mac = cpu_to_be64(res->mac << 16);
3365                         break;
3366                 }
3367         }
3368         if (!be_mac) {
3369                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3370                        port);
3371                 return -EINVAL;
3372         }
3373
3374         memset(eth_header, 0, sizeof(*eth_header));
3375         eth_header->size = sizeof(*eth_header) >> 2;
3376         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3377         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3378         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3379
3380         return 0;
3381
3382 }
3383
3384 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3385                                          struct mlx4_vhcr *vhcr,
3386                                          struct mlx4_cmd_mailbox *inbox,
3387                                          struct mlx4_cmd_mailbox *outbox,
3388                                          struct mlx4_cmd_info *cmd)
3389 {
3390
3391         struct mlx4_priv *priv = mlx4_priv(dev);
3392         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3393         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3394         int err;
3395         int qpn;
3396         struct res_qp *rqp;
3397         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3398         struct _rule_hw  *rule_header;
3399         int header_id;
3400
3401         if (dev->caps.steering_mode !=
3402             MLX4_STEERING_MODE_DEVICE_MANAGED)
3403                 return -EOPNOTSUPP;
3404
3405         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3406         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3407         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3408         if (err) {
3409                 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3410                 return err;
3411         }
3412         rule_header = (struct _rule_hw *)(ctrl + 1);
3413         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3414
3415         switch (header_id) {
3416         case MLX4_NET_TRANS_RULE_ID_ETH:
3417                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3418                         err = -EINVAL;
3419                         goto err_put;
3420                 }
3421                 break;
3422         case MLX4_NET_TRANS_RULE_ID_IB:
3423                 break;
3424         case MLX4_NET_TRANS_RULE_ID_IPV4:
3425         case MLX4_NET_TRANS_RULE_ID_TCP:
3426         case MLX4_NET_TRANS_RULE_ID_UDP:
3427                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3428                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3429                         err = -EINVAL;
3430                         goto err_put;
3431                 }
3432                 vhcr->in_modifier +=
3433                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3434                 break;
3435         default:
3436                 pr_err("Corrupted mailbox.\n");
3437                 err = -EINVAL;
3438                 goto err_put;
3439         }
3440
3441         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3442                            vhcr->in_modifier, 0,
3443                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3444                            MLX4_CMD_NATIVE);
3445         if (err)
3446                 goto err_put;
3447
3448         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3449         if (err) {
3450                 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3451                 /* detach rule*/
3452                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3453                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3454                          MLX4_CMD_NATIVE);
3455                 goto err_put;
3456         }
3457         atomic_inc(&rqp->ref_count);
3458 err_put:
3459         put_res(dev, slave, qpn, RES_QP);
3460         return err;
3461 }
3462
3463 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3464                                          struct mlx4_vhcr *vhcr,
3465                                          struct mlx4_cmd_mailbox *inbox,
3466                                          struct mlx4_cmd_mailbox *outbox,
3467                                          struct mlx4_cmd_info *cmd)
3468 {
3469         int err;
3470         struct res_qp *rqp;
3471         struct res_fs_rule *rrule;
3472
3473         if (dev->caps.steering_mode !=
3474             MLX4_STEERING_MODE_DEVICE_MANAGED)
3475                 return -EOPNOTSUPP;
3476
3477         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3478         if (err)
3479                 return err;
3480         /* Release the rule form busy state before removal */
3481         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3482         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3483         if (err)
3484                 return err;
3485
3486         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3487         if (err) {
3488                 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3489                 goto out;
3490         }
3491
3492         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3493                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3494                        MLX4_CMD_NATIVE);
3495         if (!err)
3496                 atomic_dec(&rqp->ref_count);
3497 out:
3498         put_res(dev, slave, rrule->qpn, RES_QP);
3499         return err;
3500 }
3501
3502 enum {
3503         BUSY_MAX_RETRIES = 10
3504 };
3505
3506 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3507                                struct mlx4_vhcr *vhcr,
3508                                struct mlx4_cmd_mailbox *inbox,
3509                                struct mlx4_cmd_mailbox *outbox,
3510                                struct mlx4_cmd_info *cmd)
3511 {
3512         int err;
3513         int index = vhcr->in_modifier & 0xffff;
3514
3515         err = get_res(dev, slave, index, RES_COUNTER, NULL);
3516         if (err)
3517                 return err;
3518
3519         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3520         put_res(dev, slave, index, RES_COUNTER);
3521         return err;
3522 }
3523
3524 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3525 {
3526         struct res_gid *rgid;
3527         struct res_gid *tmp;
3528         struct mlx4_qp qp; /* dummy for calling attach/detach */
3529
3530         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3531                 switch (dev->caps.steering_mode) {
3532                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3533                         mlx4_flow_detach(dev, rgid->reg_id);
3534                         break;
3535                 case MLX4_STEERING_MODE_B0:
3536                         qp.qpn = rqp->local_qpn;
3537                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3538                                                      rgid->prot, rgid->steer);
3539                         break;
3540                 }
3541                 list_del(&rgid->list);
3542                 kfree(rgid);
3543         }
3544 }
3545
3546 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3547                           enum mlx4_resource type, int print)
3548 {
3549         struct mlx4_priv *priv = mlx4_priv(dev);
3550         struct mlx4_resource_tracker *tracker =
3551                 &priv->mfunc.master.res_tracker;
3552         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3553         struct res_common *r;
3554         struct res_common *tmp;
3555         int busy;
3556
3557         busy = 0;
3558         spin_lock_irq(mlx4_tlock(dev));
3559         list_for_each_entry_safe(r, tmp, rlist, list) {
3560                 if (r->owner == slave) {
3561                         if (!r->removing) {
3562                                 if (r->state == RES_ANY_BUSY) {
3563                                         if (print)
3564                                                 mlx4_dbg(dev,
3565                                                          "%s id 0x%llx is busy\n",
3566                                                           ResourceType(type),
3567                                                           r->res_id);
3568                                         ++busy;
3569                                 } else {
3570                                         r->from_state = r->state;
3571                                         r->state = RES_ANY_BUSY;
3572                                         r->removing = 1;
3573                                 }
3574                         }
3575                 }
3576         }
3577         spin_unlock_irq(mlx4_tlock(dev));
3578
3579         return busy;
3580 }
3581
3582 static int move_all_busy(struct mlx4_dev *dev, int slave,
3583                          enum mlx4_resource type)
3584 {
3585         unsigned long begin;
3586         int busy;
3587
3588         begin = jiffies;
3589         do {
3590                 busy = _move_all_busy(dev, slave, type, 0);
3591                 if (time_after(jiffies, begin + 5 * HZ))
3592                         break;
3593                 if (busy)
3594                         cond_resched();
3595         } while (busy);
3596
3597         if (busy)
3598                 busy = _move_all_busy(dev, slave, type, 1);
3599
3600         return busy;
3601 }
3602 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3603 {
3604         struct mlx4_priv *priv = mlx4_priv(dev);
3605         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3606         struct list_head *qp_list =
3607                 &tracker->slave_list[slave].res_list[RES_QP];
3608         struct res_qp *qp;
3609         struct res_qp *tmp;
3610         int state;
3611         u64 in_param;
3612         int qpn;
3613         int err;
3614
3615         err = move_all_busy(dev, slave, RES_QP);
3616         if (err)
3617                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3618                           "for slave %d\n", slave);
3619
3620         spin_lock_irq(mlx4_tlock(dev));
3621         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3622                 spin_unlock_irq(mlx4_tlock(dev));
3623                 if (qp->com.owner == slave) {
3624                         qpn = qp->com.res_id;
3625                         detach_qp(dev, slave, qp);
3626                         state = qp->com.from_state;
3627                         while (state != 0) {
3628                                 switch (state) {
3629                                 case RES_QP_RESERVED:
3630                                         spin_lock_irq(mlx4_tlock(dev));
3631                                         rb_erase(&qp->com.node,
3632                                                  &tracker->res_tree[RES_QP]);
3633                                         list_del(&qp->com.list);
3634                                         spin_unlock_irq(mlx4_tlock(dev));
3635                                         kfree(qp);
3636                                         state = 0;
3637                                         break;
3638                                 case RES_QP_MAPPED:
3639                                         if (!valid_reserved(dev, slave, qpn))
3640                                                 __mlx4_qp_free_icm(dev, qpn);
3641                                         state = RES_QP_RESERVED;
3642                                         break;
3643                                 case RES_QP_HW:
3644                                         in_param = slave;
3645                                         err = mlx4_cmd(dev, in_param,
3646                                                        qp->local_qpn, 2,
3647                                                        MLX4_CMD_2RST_QP,
3648                                                        MLX4_CMD_TIME_CLASS_A,
3649                                                        MLX4_CMD_NATIVE);
3650                                         if (err)
3651                                                 mlx4_dbg(dev, "rem_slave_qps: failed"
3652                                                          " to move slave %d qpn %d to"
3653                                                          " reset\n", slave,
3654                                                          qp->local_qpn);
3655                                         atomic_dec(&qp->rcq->ref_count);
3656                                         atomic_dec(&qp->scq->ref_count);
3657                                         atomic_dec(&qp->mtt->ref_count);
3658                                         if (qp->srq)
3659                                                 atomic_dec(&qp->srq->ref_count);
3660                                         state = RES_QP_MAPPED;
3661                                         break;
3662                                 default:
3663                                         state = 0;
3664                                 }
3665                         }
3666                 }
3667                 spin_lock_irq(mlx4_tlock(dev));
3668         }
3669         spin_unlock_irq(mlx4_tlock(dev));
3670 }
3671
3672 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3673 {
3674         struct mlx4_priv *priv = mlx4_priv(dev);
3675         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3676         struct list_head *srq_list =
3677                 &tracker->slave_list[slave].res_list[RES_SRQ];
3678         struct res_srq *srq;
3679         struct res_srq *tmp;
3680         int state;
3681         u64 in_param;
3682         LIST_HEAD(tlist);
3683         int srqn;
3684         int err;
3685
3686         err = move_all_busy(dev, slave, RES_SRQ);
3687         if (err)
3688                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3689                           "busy for slave %d\n", slave);
3690
3691         spin_lock_irq(mlx4_tlock(dev));
3692         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3693                 spin_unlock_irq(mlx4_tlock(dev));
3694                 if (srq->com.owner == slave) {
3695                         srqn = srq->com.res_id;
3696                         state = srq->com.from_state;
3697                         while (state != 0) {
3698                                 switch (state) {
3699                                 case RES_SRQ_ALLOCATED:
3700                                         __mlx4_srq_free_icm(dev, srqn);
3701                                         spin_lock_irq(mlx4_tlock(dev));
3702                                         rb_erase(&srq->com.node,
3703                                                  &tracker->res_tree[RES_SRQ]);
3704                                         list_del(&srq->com.list);
3705                                         spin_unlock_irq(mlx4_tlock(dev));
3706                                         kfree(srq);
3707                                         state = 0;
3708                                         break;
3709
3710                                 case RES_SRQ_HW:
3711                                         in_param = slave;
3712                                         err = mlx4_cmd(dev, in_param, srqn, 1,
3713                                                        MLX4_CMD_HW2SW_SRQ,
3714                                                        MLX4_CMD_TIME_CLASS_A,
3715                                                        MLX4_CMD_NATIVE);
3716                                         if (err)
3717                                                 mlx4_dbg(dev, "rem_slave_srqs: failed"
3718                                                          " to move slave %d srq %d to"
3719                                                          " SW ownership\n",
3720                                                          slave, srqn);
3721
3722                                         atomic_dec(&srq->mtt->ref_count);
3723                                         if (srq->cq)
3724                                                 atomic_dec(&srq->cq->ref_count);
3725                                         state = RES_SRQ_ALLOCATED;
3726                                         break;
3727
3728                                 default:
3729                                         state = 0;
3730                                 }
3731                         }
3732                 }
3733                 spin_lock_irq(mlx4_tlock(dev));
3734         }
3735         spin_unlock_irq(mlx4_tlock(dev));
3736 }
3737
3738 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3739 {
3740         struct mlx4_priv *priv = mlx4_priv(dev);
3741         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3742         struct list_head *cq_list =
3743                 &tracker->slave_list[slave].res_list[RES_CQ];
3744         struct res_cq *cq;
3745         struct res_cq *tmp;
3746         int state;
3747         u64 in_param;
3748         LIST_HEAD(tlist);
3749         int cqn;
3750         int err;
3751
3752         err = move_all_busy(dev, slave, RES_CQ);
3753         if (err)
3754                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3755                           "busy for slave %d\n", slave);
3756
3757         spin_lock_irq(mlx4_tlock(dev));
3758         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3759                 spin_unlock_irq(mlx4_tlock(dev));
3760                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3761                         cqn = cq->com.res_id;
3762                         state = cq->com.from_state;
3763                         while (state != 0) {
3764                                 switch (state) {
3765                                 case RES_CQ_ALLOCATED:
3766                                         __mlx4_cq_free_icm(dev, cqn);
3767                                         spin_lock_irq(mlx4_tlock(dev));
3768                                         rb_erase(&cq->com.node,
3769                                                  &tracker->res_tree[RES_CQ]);
3770                                         list_del(&cq->com.list);
3771                                         spin_unlock_irq(mlx4_tlock(dev));
3772                                         kfree(cq);
3773                                         state = 0;
3774                                         break;
3775
3776                                 case RES_CQ_HW:
3777                                         in_param = slave;
3778                                         err = mlx4_cmd(dev, in_param, cqn, 1,
3779                                                        MLX4_CMD_HW2SW_CQ,
3780                                                        MLX4_CMD_TIME_CLASS_A,
3781                                                        MLX4_CMD_NATIVE);
3782                                         if (err)
3783                                                 mlx4_dbg(dev, "rem_slave_cqs: failed"
3784                                                          " to move slave %d cq %d to"
3785                                                          " SW ownership\n",
3786                                                          slave, cqn);
3787                                         atomic_dec(&cq->mtt->ref_count);
3788                                         state = RES_CQ_ALLOCATED;
3789                                         break;
3790
3791                                 default:
3792                                         state = 0;
3793                                 }
3794                         }
3795                 }
3796                 spin_lock_irq(mlx4_tlock(dev));
3797         }
3798         spin_unlock_irq(mlx4_tlock(dev));
3799 }
3800
3801 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3802 {
3803         struct mlx4_priv *priv = mlx4_priv(dev);
3804         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3805         struct list_head *mpt_list =
3806                 &tracker->slave_list[slave].res_list[RES_MPT];
3807         struct res_mpt *mpt;
3808         struct res_mpt *tmp;
3809         int state;
3810         u64 in_param;
3811         LIST_HEAD(tlist);
3812         int mptn;
3813         int err;
3814
3815         err = move_all_busy(dev, slave, RES_MPT);
3816         if (err)
3817                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3818                           "busy for slave %d\n", slave);
3819
3820         spin_lock_irq(mlx4_tlock(dev));
3821         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3822                 spin_unlock_irq(mlx4_tlock(dev));
3823                 if (mpt->com.owner == slave) {
3824                         mptn = mpt->com.res_id;
3825                         state = mpt->com.from_state;
3826                         while (state != 0) {
3827                                 switch (state) {
3828                                 case RES_MPT_RESERVED:
3829                                         __mlx4_mpt_release(dev, mpt->key);
3830                                         spin_lock_irq(mlx4_tlock(dev));
3831                                         rb_erase(&mpt->com.node,
3832                                                  &tracker->res_tree[RES_MPT]);
3833                                         list_del(&mpt->com.list);
3834                                         spin_unlock_irq(mlx4_tlock(dev));
3835                                         kfree(mpt);
3836                                         state = 0;
3837                                         break;
3838
3839                                 case RES_MPT_MAPPED:
3840                                         __mlx4_mpt_free_icm(dev, mpt->key);
3841                                         state = RES_MPT_RESERVED;
3842                                         break;
3843
3844                                 case RES_MPT_HW:
3845                                         in_param = slave;
3846                                         err = mlx4_cmd(dev, in_param, mptn, 0,
3847                                                      MLX4_CMD_HW2SW_MPT,
3848                                                      MLX4_CMD_TIME_CLASS_A,
3849                                                      MLX4_CMD_NATIVE);
3850                                         if (err)
3851                                                 mlx4_dbg(dev, "rem_slave_mrs: failed"
3852                                                          " to move slave %d mpt %d to"
3853                                                          " SW ownership\n",
3854                                                          slave, mptn);
3855                                         if (mpt->mtt)
3856                                                 atomic_dec(&mpt->mtt->ref_count);
3857                                         state = RES_MPT_MAPPED;
3858                                         break;
3859                                 default:
3860                                         state = 0;
3861                                 }
3862                         }
3863                 }
3864                 spin_lock_irq(mlx4_tlock(dev));
3865         }
3866         spin_unlock_irq(mlx4_tlock(dev));
3867 }
3868
3869 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3870 {
3871         struct mlx4_priv *priv = mlx4_priv(dev);
3872         struct mlx4_resource_tracker *tracker =
3873                 &priv->mfunc.master.res_tracker;
3874         struct list_head *mtt_list =
3875                 &tracker->slave_list[slave].res_list[RES_MTT];
3876         struct res_mtt *mtt;
3877         struct res_mtt *tmp;
3878         int state;
3879         LIST_HEAD(tlist);
3880         int base;
3881         int err;
3882
3883         err = move_all_busy(dev, slave, RES_MTT);
3884         if (err)
3885                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3886                           "busy for slave %d\n", slave);
3887
3888         spin_lock_irq(mlx4_tlock(dev));
3889         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3890                 spin_unlock_irq(mlx4_tlock(dev));
3891                 if (mtt->com.owner == slave) {
3892                         base = mtt->com.res_id;
3893                         state = mtt->com.from_state;
3894                         while (state != 0) {
3895                                 switch (state) {
3896                                 case RES_MTT_ALLOCATED:
3897                                         __mlx4_free_mtt_range(dev, base,
3898                                                               mtt->order);
3899                                         spin_lock_irq(mlx4_tlock(dev));
3900                                         rb_erase(&mtt->com.node,
3901                                                  &tracker->res_tree[RES_MTT]);
3902                                         list_del(&mtt->com.list);
3903                                         spin_unlock_irq(mlx4_tlock(dev));
3904                                         kfree(mtt);
3905                                         state = 0;
3906                                         break;
3907
3908                                 default:
3909                                         state = 0;
3910                                 }
3911                         }
3912                 }
3913                 spin_lock_irq(mlx4_tlock(dev));
3914         }
3915         spin_unlock_irq(mlx4_tlock(dev));
3916 }
3917
3918 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3919 {
3920         struct mlx4_priv *priv = mlx4_priv(dev);
3921         struct mlx4_resource_tracker *tracker =
3922                 &priv->mfunc.master.res_tracker;
3923         struct list_head *fs_rule_list =
3924                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3925         struct res_fs_rule *fs_rule;
3926         struct res_fs_rule *tmp;
3927         int state;
3928         u64 base;
3929         int err;
3930
3931         err = move_all_busy(dev, slave, RES_FS_RULE);
3932         if (err)
3933                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3934                           slave);
3935
3936         spin_lock_irq(mlx4_tlock(dev));
3937         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3938                 spin_unlock_irq(mlx4_tlock(dev));
3939                 if (fs_rule->com.owner == slave) {
3940                         base = fs_rule->com.res_id;
3941                         state = fs_rule->com.from_state;
3942                         while (state != 0) {
3943                                 switch (state) {
3944                                 case RES_FS_RULE_ALLOCATED:
3945                                         /* detach rule */
3946                                         err = mlx4_cmd(dev, base, 0, 0,
3947                                                        MLX4_QP_FLOW_STEERING_DETACH,
3948                                                        MLX4_CMD_TIME_CLASS_A,
3949                                                        MLX4_CMD_NATIVE);
3950
3951                                         spin_lock_irq(mlx4_tlock(dev));
3952                                         rb_erase(&fs_rule->com.node,
3953                                                  &tracker->res_tree[RES_FS_RULE]);
3954                                         list_del(&fs_rule->com.list);
3955                                         spin_unlock_irq(mlx4_tlock(dev));
3956                                         kfree(fs_rule);
3957                                         state = 0;
3958                                         break;
3959
3960                                 default:
3961                                         state = 0;
3962                                 }
3963                         }
3964                 }
3965                 spin_lock_irq(mlx4_tlock(dev));
3966         }
3967         spin_unlock_irq(mlx4_tlock(dev));
3968 }
3969
3970 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3971 {
3972         struct mlx4_priv *priv = mlx4_priv(dev);
3973         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3974         struct list_head *eq_list =
3975                 &tracker->slave_list[slave].res_list[RES_EQ];
3976         struct res_eq *eq;
3977         struct res_eq *tmp;
3978         int err;
3979         int state;
3980         LIST_HEAD(tlist);
3981         int eqn;
3982         struct mlx4_cmd_mailbox *mailbox;
3983
3984         err = move_all_busy(dev, slave, RES_EQ);
3985         if (err)
3986                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3987                           "busy for slave %d\n", slave);
3988
3989         spin_lock_irq(mlx4_tlock(dev));
3990         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3991                 spin_unlock_irq(mlx4_tlock(dev));
3992                 if (eq->com.owner == slave) {
3993                         eqn = eq->com.res_id;
3994                         state = eq->com.from_state;
3995                         while (state != 0) {
3996                                 switch (state) {
3997                                 case RES_EQ_RESERVED:
3998                                         spin_lock_irq(mlx4_tlock(dev));
3999                                         rb_erase(&eq->com.node,
4000                                                  &tracker->res_tree[RES_EQ]);
4001                                         list_del(&eq->com.list);
4002                                         spin_unlock_irq(mlx4_tlock(dev));
4003                                         kfree(eq);
4004                                         state = 0;
4005                                         break;
4006
4007                                 case RES_EQ_HW:
4008                                         mailbox = mlx4_alloc_cmd_mailbox(dev);
4009                                         if (IS_ERR(mailbox)) {
4010                                                 cond_resched();
4011                                                 continue;
4012                                         }
4013                                         err = mlx4_cmd_box(dev, slave, 0,
4014                                                            eqn & 0xff, 0,
4015                                                            MLX4_CMD_HW2SW_EQ,
4016                                                            MLX4_CMD_TIME_CLASS_A,
4017                                                            MLX4_CMD_NATIVE);
4018                                         if (err)
4019                                                 mlx4_dbg(dev, "rem_slave_eqs: failed"
4020                                                          " to move slave %d eqs %d to"
4021                                                          " SW ownership\n", slave, eqn);
4022                                         mlx4_free_cmd_mailbox(dev, mailbox);
4023                                         atomic_dec(&eq->mtt->ref_count);
4024                                         state = RES_EQ_RESERVED;
4025                                         break;
4026
4027                                 default:
4028                                         state = 0;
4029                                 }
4030                         }
4031                 }
4032                 spin_lock_irq(mlx4_tlock(dev));
4033         }
4034         spin_unlock_irq(mlx4_tlock(dev));
4035 }
4036
4037 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4038 {
4039         struct mlx4_priv *priv = mlx4_priv(dev);
4040         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4041         struct list_head *counter_list =
4042                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4043         struct res_counter *counter;
4044         struct res_counter *tmp;
4045         int err;
4046         int index;
4047
4048         err = move_all_busy(dev, slave, RES_COUNTER);
4049         if (err)
4050                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4051                           "busy for slave %d\n", slave);
4052
4053         spin_lock_irq(mlx4_tlock(dev));
4054         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4055                 if (counter->com.owner == slave) {
4056                         index = counter->com.res_id;
4057                         rb_erase(&counter->com.node,
4058                                  &tracker->res_tree[RES_COUNTER]);
4059                         list_del(&counter->com.list);
4060                         kfree(counter);
4061                         __mlx4_counter_free(dev, index);
4062                 }
4063         }
4064         spin_unlock_irq(mlx4_tlock(dev));
4065 }
4066
4067 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4068 {
4069         struct mlx4_priv *priv = mlx4_priv(dev);
4070         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4071         struct list_head *xrcdn_list =
4072                 &tracker->slave_list[slave].res_list[RES_XRCD];
4073         struct res_xrcdn *xrcd;
4074         struct res_xrcdn *tmp;
4075         int err;
4076         int xrcdn;
4077
4078         err = move_all_busy(dev, slave, RES_XRCD);
4079         if (err)
4080                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4081                           "busy for slave %d\n", slave);
4082
4083         spin_lock_irq(mlx4_tlock(dev));
4084         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4085                 if (xrcd->com.owner == slave) {
4086                         xrcdn = xrcd->com.res_id;
4087                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4088                         list_del(&xrcd->com.list);
4089                         kfree(xrcd);
4090                         __mlx4_xrcd_free(dev, xrcdn);
4091                 }
4092         }
4093         spin_unlock_irq(mlx4_tlock(dev));
4094 }
4095
4096 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4097 {
4098         struct mlx4_priv *priv = mlx4_priv(dev);
4099
4100         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4101         rem_slave_vlans(dev, slave);
4102         rem_slave_macs(dev, slave);
4103         rem_slave_fs_rule(dev, slave);
4104         rem_slave_qps(dev, slave);
4105         rem_slave_srqs(dev, slave);
4106         rem_slave_cqs(dev, slave);
4107         rem_slave_mrs(dev, slave);
4108         rem_slave_eqs(dev, slave);
4109         rem_slave_mtts(dev, slave);
4110         rem_slave_counters(dev, slave);
4111         rem_slave_xrcdns(dev, slave);
4112         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4113 }
4114
4115 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4116 {
4117         struct mlx4_vf_immed_vlan_work *work =
4118                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4119         struct mlx4_cmd_mailbox *mailbox;
4120         struct mlx4_update_qp_context *upd_context;
4121         struct mlx4_dev *dev = &work->priv->dev;
4122         struct mlx4_resource_tracker *tracker =
4123                 &work->priv->mfunc.master.res_tracker;
4124         struct list_head *qp_list =
4125                 &tracker->slave_list[work->slave].res_list[RES_QP];
4126         struct res_qp *qp;
4127         struct res_qp *tmp;
4128         u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4129                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4130                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4131                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4132                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4133                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
4134                        (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4135                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4136
4137         int err;
4138         int port, errors = 0;
4139         u8 vlan_control;
4140
4141         if (mlx4_is_slave(dev)) {
4142                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4143                           work->slave);
4144                 goto out;
4145         }
4146
4147         mailbox = mlx4_alloc_cmd_mailbox(dev);
4148         if (IS_ERR(mailbox))
4149                 goto out;
4150         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4151                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4152                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4153                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4154                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4155                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4156                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4157         else if (!work->vlan_id)
4158                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4159                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4160         else
4161                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4162                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4163                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4164
4165         upd_context = mailbox->buf;
4166         upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4167         upd_context->qp_context.pri_path.vlan_control = vlan_control;
4168         upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4169
4170         spin_lock_irq(mlx4_tlock(dev));
4171         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4172                 spin_unlock_irq(mlx4_tlock(dev));
4173                 if (qp->com.owner == work->slave) {
4174                         if (qp->com.from_state != RES_QP_HW ||
4175                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4176                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4177                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4178                                 spin_lock_irq(mlx4_tlock(dev));
4179                                 continue;
4180                         }
4181                         port = (qp->sched_queue >> 6 & 1) + 1;
4182                         if (port != work->port) {
4183                                 spin_lock_irq(mlx4_tlock(dev));
4184                                 continue;
4185                         }
4186                         upd_context->qp_context.pri_path.sched_queue =
4187                                 qp->sched_queue & 0xC7;
4188                         upd_context->qp_context.pri_path.sched_queue |=
4189                                 ((work->qos & 0x7) << 3);
4190
4191                         err = mlx4_cmd(dev, mailbox->dma,
4192                                        qp->local_qpn & 0xffffff,
4193                                        0, MLX4_CMD_UPDATE_QP,
4194                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4195                         if (err) {
4196                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4197                                           "port %d, qpn %d (%d)\n",
4198                                           work->slave, port, qp->local_qpn,
4199                                           err);
4200                                 errors++;
4201                         }
4202                 }
4203                 spin_lock_irq(mlx4_tlock(dev));
4204         }
4205         spin_unlock_irq(mlx4_tlock(dev));
4206         mlx4_free_cmd_mailbox(dev, mailbox);
4207
4208         if (errors)
4209                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4210                          errors, work->slave, work->port);
4211
4212         /* unregister previous vlan_id if needed and we had no errors
4213          * while updating the QPs
4214          */
4215         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4216             NO_INDX != work->orig_vlan_ix)
4217                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4218                                        work->orig_vlan_id);
4219 out:
4220         kfree(work);
4221         return;
4222 }