Merge branch 'clk-qcom-8996-halt' into clk-next
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "mlx4_stats.h"
50
51 #define MLX4_MAC_VALID          (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT       2
53 #define MLX4_VF_COUNTERS_PER_PORT       1
54
55 struct mac_res {
56         struct list_head list;
57         u64 mac;
58         int ref_count;
59         u8 smac_index;
60         u8 port;
61 };
62
63 struct vlan_res {
64         struct list_head list;
65         u16 vlan;
66         int ref_count;
67         int vlan_index;
68         u8 port;
69 };
70
71 struct res_common {
72         struct list_head        list;
73         struct rb_node          node;
74         u64                     res_id;
75         int                     owner;
76         int                     state;
77         int                     from_state;
78         int                     to_state;
79         int                     removing;
80         const char              *func_name;
81 };
82
83 enum {
84         RES_ANY_BUSY = 1
85 };
86
87 struct res_gid {
88         struct list_head        list;
89         u8                      gid[16];
90         enum mlx4_protocol      prot;
91         enum mlx4_steer_type    steer;
92         u64                     reg_id;
93 };
94
95 enum res_qp_states {
96         RES_QP_BUSY = RES_ANY_BUSY,
97
98         /* QP number was allocated */
99         RES_QP_RESERVED,
100
101         /* ICM memory for QP context was mapped */
102         RES_QP_MAPPED,
103
104         /* QP is in hw ownership */
105         RES_QP_HW
106 };
107
108 struct res_qp {
109         struct res_common       com;
110         struct res_mtt         *mtt;
111         struct res_cq          *rcq;
112         struct res_cq          *scq;
113         struct res_srq         *srq;
114         struct list_head        mcg_list;
115         spinlock_t              mcg_spl;
116         int                     local_qpn;
117         atomic_t                ref_count;
118         u32                     qpc_flags;
119         /* saved qp params before VST enforcement in order to restore on VGT */
120         u8                      sched_queue;
121         __be32                  param3;
122         u8                      vlan_control;
123         u8                      fvl_rx;
124         u8                      pri_path_fl;
125         u8                      vlan_index;
126         u8                      feup;
127 };
128
129 enum res_mtt_states {
130         RES_MTT_BUSY = RES_ANY_BUSY,
131         RES_MTT_ALLOCATED,
132 };
133
134 static inline const char *mtt_states_str(enum res_mtt_states state)
135 {
136         switch (state) {
137         case RES_MTT_BUSY: return "RES_MTT_BUSY";
138         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
139         default: return "Unknown";
140         }
141 }
142
143 struct res_mtt {
144         struct res_common       com;
145         int                     order;
146         atomic_t                ref_count;
147 };
148
149 enum res_mpt_states {
150         RES_MPT_BUSY = RES_ANY_BUSY,
151         RES_MPT_RESERVED,
152         RES_MPT_MAPPED,
153         RES_MPT_HW,
154 };
155
156 struct res_mpt {
157         struct res_common       com;
158         struct res_mtt         *mtt;
159         int                     key;
160 };
161
162 enum res_eq_states {
163         RES_EQ_BUSY = RES_ANY_BUSY,
164         RES_EQ_RESERVED,
165         RES_EQ_HW,
166 };
167
168 struct res_eq {
169         struct res_common       com;
170         struct res_mtt         *mtt;
171 };
172
173 enum res_cq_states {
174         RES_CQ_BUSY = RES_ANY_BUSY,
175         RES_CQ_ALLOCATED,
176         RES_CQ_HW,
177 };
178
179 struct res_cq {
180         struct res_common       com;
181         struct res_mtt         *mtt;
182         atomic_t                ref_count;
183 };
184
185 enum res_srq_states {
186         RES_SRQ_BUSY = RES_ANY_BUSY,
187         RES_SRQ_ALLOCATED,
188         RES_SRQ_HW,
189 };
190
191 struct res_srq {
192         struct res_common       com;
193         struct res_mtt         *mtt;
194         struct res_cq          *cq;
195         atomic_t                ref_count;
196 };
197
198 enum res_counter_states {
199         RES_COUNTER_BUSY = RES_ANY_BUSY,
200         RES_COUNTER_ALLOCATED,
201 };
202
203 struct res_counter {
204         struct res_common       com;
205         int                     port;
206 };
207
208 enum res_xrcdn_states {
209         RES_XRCD_BUSY = RES_ANY_BUSY,
210         RES_XRCD_ALLOCATED,
211 };
212
213 struct res_xrcdn {
214         struct res_common       com;
215         int                     port;
216 };
217
218 enum res_fs_rule_states {
219         RES_FS_RULE_BUSY = RES_ANY_BUSY,
220         RES_FS_RULE_ALLOCATED,
221 };
222
223 struct res_fs_rule {
224         struct res_common       com;
225         int                     qpn;
226         /* VF DMFS mbox with port flipped */
227         void                    *mirr_mbox;
228         /* > 0 --> apply mirror when getting into HA mode      */
229         /* = 0 --> un-apply mirror when getting out of HA mode */
230         u32                     mirr_mbox_size;
231         struct list_head        mirr_list;
232         u64                     mirr_rule_id;
233 };
234
235 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
236 {
237         struct rb_node *node = root->rb_node;
238
239         while (node) {
240                 struct res_common *res = rb_entry(node, struct res_common,
241                                                   node);
242
243                 if (res_id < res->res_id)
244                         node = node->rb_left;
245                 else if (res_id > res->res_id)
246                         node = node->rb_right;
247                 else
248                         return res;
249         }
250         return NULL;
251 }
252
253 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
254 {
255         struct rb_node **new = &(root->rb_node), *parent = NULL;
256
257         /* Figure out where to put new node */
258         while (*new) {
259                 struct res_common *this = rb_entry(*new, struct res_common,
260                                                    node);
261
262                 parent = *new;
263                 if (res->res_id < this->res_id)
264                         new = &((*new)->rb_left);
265                 else if (res->res_id > this->res_id)
266                         new = &((*new)->rb_right);
267                 else
268                         return -EEXIST;
269         }
270
271         /* Add new node and rebalance tree. */
272         rb_link_node(&res->node, parent, new);
273         rb_insert_color(&res->node, root);
274
275         return 0;
276 }
277
278 enum qp_transition {
279         QP_TRANS_INIT2RTR,
280         QP_TRANS_RTR2RTS,
281         QP_TRANS_RTS2RTS,
282         QP_TRANS_SQERR2RTS,
283         QP_TRANS_SQD2SQD,
284         QP_TRANS_SQD2RTS
285 };
286
287 /* For Debug uses */
288 static const char *resource_str(enum mlx4_resource rt)
289 {
290         switch (rt) {
291         case RES_QP: return "RES_QP";
292         case RES_CQ: return "RES_CQ";
293         case RES_SRQ: return "RES_SRQ";
294         case RES_MPT: return "RES_MPT";
295         case RES_MTT: return "RES_MTT";
296         case RES_MAC: return  "RES_MAC";
297         case RES_VLAN: return  "RES_VLAN";
298         case RES_EQ: return "RES_EQ";
299         case RES_COUNTER: return "RES_COUNTER";
300         case RES_FS_RULE: return "RES_FS_RULE";
301         case RES_XRCD: return "RES_XRCD";
302         default: return "Unknown resource type !!!";
303         };
304 }
305
306 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
307 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
308                                       enum mlx4_resource res_type, int count,
309                                       int port)
310 {
311         struct mlx4_priv *priv = mlx4_priv(dev);
312         struct resource_allocator *res_alloc =
313                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
314         int err = -EDQUOT;
315         int allocated, free, reserved, guaranteed, from_free;
316         int from_rsvd;
317
318         if (slave > dev->persist->num_vfs)
319                 return -EINVAL;
320
321         spin_lock(&res_alloc->alloc_lock);
322         allocated = (port > 0) ?
323                 res_alloc->allocated[(port - 1) *
324                 (dev->persist->num_vfs + 1) + slave] :
325                 res_alloc->allocated[slave];
326         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
327                 res_alloc->res_free;
328         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
329                 res_alloc->res_reserved;
330         guaranteed = res_alloc->guaranteed[slave];
331
332         if (allocated + count > res_alloc->quota[slave]) {
333                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334                           slave, port, resource_str(res_type), count,
335                           allocated, res_alloc->quota[slave]);
336                 goto out;
337         }
338
339         if (allocated + count <= guaranteed) {
340                 err = 0;
341                 from_rsvd = count;
342         } else {
343                 /* portion may need to be obtained from free area */
344                 if (guaranteed - allocated > 0)
345                         from_free = count - (guaranteed - allocated);
346                 else
347                         from_free = count;
348
349                 from_rsvd = count - from_free;
350
351                 if (free - from_free >= reserved)
352                         err = 0;
353                 else
354                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355                                   slave, port, resource_str(res_type), free,
356                                   from_free, reserved);
357         }
358
359         if (!err) {
360                 /* grant the request */
361                 if (port > 0) {
362                         res_alloc->allocated[(port - 1) *
363                         (dev->persist->num_vfs + 1) + slave] += count;
364                         res_alloc->res_port_free[port - 1] -= count;
365                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
366                 } else {
367                         res_alloc->allocated[slave] += count;
368                         res_alloc->res_free -= count;
369                         res_alloc->res_reserved -= from_rsvd;
370                 }
371         }
372
373 out:
374         spin_unlock(&res_alloc->alloc_lock);
375         return err;
376 }
377
378 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
379                                     enum mlx4_resource res_type, int count,
380                                     int port)
381 {
382         struct mlx4_priv *priv = mlx4_priv(dev);
383         struct resource_allocator *res_alloc =
384                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
385         int allocated, guaranteed, from_rsvd;
386
387         if (slave > dev->persist->num_vfs)
388                 return;
389
390         spin_lock(&res_alloc->alloc_lock);
391
392         allocated = (port > 0) ?
393                 res_alloc->allocated[(port - 1) *
394                 (dev->persist->num_vfs + 1) + slave] :
395                 res_alloc->allocated[slave];
396         guaranteed = res_alloc->guaranteed[slave];
397
398         if (allocated - count >= guaranteed) {
399                 from_rsvd = 0;
400         } else {
401                 /* portion may need to be returned to reserved area */
402                 if (allocated - guaranteed > 0)
403                         from_rsvd = count - (allocated - guaranteed);
404                 else
405                         from_rsvd = count;
406         }
407
408         if (port > 0) {
409                 res_alloc->allocated[(port - 1) *
410                 (dev->persist->num_vfs + 1) + slave] -= count;
411                 res_alloc->res_port_free[port - 1] += count;
412                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
413         } else {
414                 res_alloc->allocated[slave] -= count;
415                 res_alloc->res_free += count;
416                 res_alloc->res_reserved += from_rsvd;
417         }
418
419         spin_unlock(&res_alloc->alloc_lock);
420         return;
421 }
422
423 static inline void initialize_res_quotas(struct mlx4_dev *dev,
424                                          struct resource_allocator *res_alloc,
425                                          enum mlx4_resource res_type,
426                                          int vf, int num_instances)
427 {
428         res_alloc->guaranteed[vf] = num_instances /
429                                     (2 * (dev->persist->num_vfs + 1));
430         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
431         if (vf == mlx4_master_func_num(dev)) {
432                 res_alloc->res_free = num_instances;
433                 if (res_type == RES_MTT) {
434                         /* reserved mtts will be taken out of the PF allocation */
435                         res_alloc->res_free += dev->caps.reserved_mtts;
436                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
437                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
438                 }
439         }
440 }
441
442 void mlx4_init_quotas(struct mlx4_dev *dev)
443 {
444         struct mlx4_priv *priv = mlx4_priv(dev);
445         int pf;
446
447         /* quotas for VFs are initialized in mlx4_slave_cap */
448         if (mlx4_is_slave(dev))
449                 return;
450
451         if (!mlx4_is_mfunc(dev)) {
452                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
453                         mlx4_num_reserved_sqps(dev);
454                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
455                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
456                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
457                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
458                 return;
459         }
460
461         pf = mlx4_master_func_num(dev);
462         dev->quotas.qp =
463                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
464         dev->quotas.cq =
465                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
466         dev->quotas.srq =
467                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
468         dev->quotas.mtt =
469                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
470         dev->quotas.mpt =
471                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
472 }
473
474 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
475 {
476         /* reduce the sink counter */
477         return (dev->caps.max_counters - 1 -
478                 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
479                 / MLX4_MAX_PORTS;
480 }
481
482 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
483 {
484         struct mlx4_priv *priv = mlx4_priv(dev);
485         int i, j;
486         int t;
487         int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
488
489         priv->mfunc.master.res_tracker.slave_list =
490                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
491                         GFP_KERNEL);
492         if (!priv->mfunc.master.res_tracker.slave_list)
493                 return -ENOMEM;
494
495         for (i = 0 ; i < dev->num_slaves; i++) {
496                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
497                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
498                                        slave_list[i].res_list[t]);
499                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
500         }
501
502         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
503                  dev->num_slaves);
504         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
505                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
506
507         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
508                 struct resource_allocator *res_alloc =
509                         &priv->mfunc.master.res_tracker.res_alloc[i];
510                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
511                                            sizeof(int), GFP_KERNEL);
512                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
513                                                 sizeof(int), GFP_KERNEL);
514                 if (i == RES_MAC || i == RES_VLAN)
515                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
516                                                        (dev->persist->num_vfs
517                                                        + 1) *
518                                                        sizeof(int), GFP_KERNEL);
519                 else
520                         res_alloc->allocated = kzalloc((dev->persist->
521                                                         num_vfs + 1) *
522                                                        sizeof(int), GFP_KERNEL);
523                 /* Reduce the sink counter */
524                 if (i == RES_COUNTER)
525                         res_alloc->res_free = dev->caps.max_counters - 1;
526
527                 if (!res_alloc->quota || !res_alloc->guaranteed ||
528                     !res_alloc->allocated)
529                         goto no_mem_err;
530
531                 spin_lock_init(&res_alloc->alloc_lock);
532                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
533                         struct mlx4_active_ports actv_ports =
534                                 mlx4_get_active_ports(dev, t);
535                         switch (i) {
536                         case RES_QP:
537                                 initialize_res_quotas(dev, res_alloc, RES_QP,
538                                                       t, dev->caps.num_qps -
539                                                       dev->caps.reserved_qps -
540                                                       mlx4_num_reserved_sqps(dev));
541                                 break;
542                         case RES_CQ:
543                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
544                                                       t, dev->caps.num_cqs -
545                                                       dev->caps.reserved_cqs);
546                                 break;
547                         case RES_SRQ:
548                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
549                                                       t, dev->caps.num_srqs -
550                                                       dev->caps.reserved_srqs);
551                                 break;
552                         case RES_MPT:
553                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
554                                                       t, dev->caps.num_mpts -
555                                                       dev->caps.reserved_mrws);
556                                 break;
557                         case RES_MTT:
558                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
559                                                       t, dev->caps.num_mtts -
560                                                       dev->caps.reserved_mtts);
561                                 break;
562                         case RES_MAC:
563                                 if (t == mlx4_master_func_num(dev)) {
564                                         int max_vfs_pport = 0;
565                                         /* Calculate the max vfs per port for */
566                                         /* both ports.                        */
567                                         for (j = 0; j < dev->caps.num_ports;
568                                              j++) {
569                                                 struct mlx4_slaves_pport slaves_pport =
570                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
571                                                 unsigned current_slaves =
572                                                         bitmap_weight(slaves_pport.slaves,
573                                                                       dev->caps.num_ports) - 1;
574                                                 if (max_vfs_pport < current_slaves)
575                                                         max_vfs_pport =
576                                                                 current_slaves;
577                                         }
578                                         res_alloc->quota[t] =
579                                                 MLX4_MAX_MAC_NUM -
580                                                 2 * max_vfs_pport;
581                                         res_alloc->guaranteed[t] = 2;
582                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
583                                                 res_alloc->res_port_free[j] =
584                                                         MLX4_MAX_MAC_NUM;
585                                 } else {
586                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
587                                         res_alloc->guaranteed[t] = 2;
588                                 }
589                                 break;
590                         case RES_VLAN:
591                                 if (t == mlx4_master_func_num(dev)) {
592                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
593                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
594                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
595                                                 res_alloc->res_port_free[j] =
596                                                         res_alloc->quota[t];
597                                 } else {
598                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
599                                         res_alloc->guaranteed[t] = 0;
600                                 }
601                                 break;
602                         case RES_COUNTER:
603                                 res_alloc->quota[t] = dev->caps.max_counters;
604                                 if (t == mlx4_master_func_num(dev))
605                                         res_alloc->guaranteed[t] =
606                                                 MLX4_PF_COUNTERS_PER_PORT *
607                                                 MLX4_MAX_PORTS;
608                                 else if (t <= max_vfs_guarantee_counter)
609                                         res_alloc->guaranteed[t] =
610                                                 MLX4_VF_COUNTERS_PER_PORT *
611                                                 MLX4_MAX_PORTS;
612                                 else
613                                         res_alloc->guaranteed[t] = 0;
614                                 break;
615                         default:
616                                 break;
617                         }
618                         if (i == RES_MAC || i == RES_VLAN) {
619                                 for (j = 0; j < dev->caps.num_ports; j++)
620                                         if (test_bit(j, actv_ports.ports))
621                                                 res_alloc->res_port_rsvd[j] +=
622                                                         res_alloc->guaranteed[t];
623                         } else {
624                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
625                         }
626                 }
627         }
628         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
629         return 0;
630
631 no_mem_err:
632         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
633                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
634                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
635                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
636                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
637                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
638                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
639         }
640         return -ENOMEM;
641 }
642
643 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
644                                 enum mlx4_res_tracker_free_type type)
645 {
646         struct mlx4_priv *priv = mlx4_priv(dev);
647         int i;
648
649         if (priv->mfunc.master.res_tracker.slave_list) {
650                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
651                         for (i = 0; i < dev->num_slaves; i++) {
652                                 if (type == RES_TR_FREE_ALL ||
653                                     dev->caps.function != i)
654                                         mlx4_delete_all_resources_for_slave(dev, i);
655                         }
656                         /* free master's vlans */
657                         i = dev->caps.function;
658                         mlx4_reset_roce_gids(dev, i);
659                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
660                         rem_slave_vlans(dev, i);
661                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
662                 }
663
664                 if (type != RES_TR_FREE_SLAVES_ONLY) {
665                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
666                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
667                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
668                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
669                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
670                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
671                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
672                         }
673                         kfree(priv->mfunc.master.res_tracker.slave_list);
674                         priv->mfunc.master.res_tracker.slave_list = NULL;
675                 }
676         }
677 }
678
679 static void update_pkey_index(struct mlx4_dev *dev, int slave,
680                               struct mlx4_cmd_mailbox *inbox)
681 {
682         u8 sched = *(u8 *)(inbox->buf + 64);
683         u8 orig_index = *(u8 *)(inbox->buf + 35);
684         u8 new_index;
685         struct mlx4_priv *priv = mlx4_priv(dev);
686         int port;
687
688         port = (sched >> 6 & 1) + 1;
689
690         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
691         *(u8 *)(inbox->buf + 35) = new_index;
692 }
693
694 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
695                        u8 slave)
696 {
697         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
698         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
699         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
700         int port;
701
702         if (MLX4_QP_ST_UD == ts) {
703                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
704                 if (mlx4_is_eth(dev, port))
705                         qp_ctx->pri_path.mgid_index =
706                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
707                 else
708                         qp_ctx->pri_path.mgid_index = slave | 0x80;
709
710         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
711                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
712                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
713                         if (mlx4_is_eth(dev, port)) {
714                                 qp_ctx->pri_path.mgid_index +=
715                                         mlx4_get_base_gid_ix(dev, slave, port);
716                                 qp_ctx->pri_path.mgid_index &= 0x7f;
717                         } else {
718                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
719                         }
720                 }
721                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
722                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
723                         if (mlx4_is_eth(dev, port)) {
724                                 qp_ctx->alt_path.mgid_index +=
725                                         mlx4_get_base_gid_ix(dev, slave, port);
726                                 qp_ctx->alt_path.mgid_index &= 0x7f;
727                         } else {
728                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
729                         }
730                 }
731         }
732 }
733
734 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
735                           u8 slave, int port);
736
737 static int update_vport_qp_param(struct mlx4_dev *dev,
738                                  struct mlx4_cmd_mailbox *inbox,
739                                  u8 slave, u32 qpn)
740 {
741         struct mlx4_qp_context  *qpc = inbox->buf + 8;
742         struct mlx4_vport_oper_state *vp_oper;
743         struct mlx4_priv *priv;
744         u32 qp_type;
745         int port, err = 0;
746
747         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
748         priv = mlx4_priv(dev);
749         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
750         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
751
752         err = handle_counter(dev, qpc, slave, port);
753         if (err)
754                 goto out;
755
756         if (MLX4_VGT != vp_oper->state.default_vlan) {
757                 /* the reserved QPs (special, proxy, tunnel)
758                  * do not operate over vlans
759                  */
760                 if (mlx4_is_qp_reserved(dev, qpn))
761                         return 0;
762
763                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
764                 if (qp_type == MLX4_QP_ST_UD ||
765                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
766                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
767                                 *(__be32 *)inbox->buf =
768                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
769                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
770                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
771                         } else {
772                                 struct mlx4_update_qp_params params = {.flags = 0};
773
774                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
775                                 if (err)
776                                         goto out;
777                         }
778                 }
779
780                 /* preserve IF_COUNTER flag */
781                 qpc->pri_path.vlan_control &=
782                         MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
783                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
784                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
785                         qpc->pri_path.vlan_control |=
786                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
787                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
788                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
789                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
790                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
791                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
792                 } else if (0 != vp_oper->state.default_vlan) {
793                         if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
794                                 /* vst QinQ should block untagged on TX,
795                                  * but cvlan is in payload and phv is set so
796                                  * hw see it as untagged. Block tagged instead.
797                                  */
798                                 qpc->pri_path.vlan_control |=
799                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
800                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
801                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
802                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
803                         } else { /* vst 802.1Q */
804                                 qpc->pri_path.vlan_control |=
805                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
806                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
807                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
808                         }
809                 } else { /* priority tagged */
810                         qpc->pri_path.vlan_control |=
811                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
812                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
813                 }
814
815                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
816                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
817                 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
818                 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
819                         qpc->pri_path.fl |= MLX4_FL_SV;
820                 else
821                         qpc->pri_path.fl |= MLX4_FL_CV;
822                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
823                 qpc->pri_path.sched_queue &= 0xC7;
824                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
825                 qpc->qos_vport = vp_oper->state.qos_vport;
826         }
827         if (vp_oper->state.spoofchk) {
828                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
829                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
830         }
831 out:
832         return err;
833 }
834
835 static int mpt_mask(struct mlx4_dev *dev)
836 {
837         return dev->caps.num_mpts - 1;
838 }
839
840 static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
841 {
842         switch (t) {
843         case RES_QP:
844                 return "QP";
845         case RES_CQ:
846                 return "CQ";
847         case RES_SRQ:
848                 return "SRQ";
849         case RES_XRCD:
850                 return "XRCD";
851         case RES_MPT:
852                 return "MPT";
853         case RES_MTT:
854                 return "MTT";
855         case RES_MAC:
856                 return "MAC";
857         case RES_VLAN:
858                 return "VLAN";
859         case RES_COUNTER:
860                 return "COUNTER";
861         case RES_FS_RULE:
862                 return "FS_RULE";
863         case RES_EQ:
864                 return "EQ";
865         default:
866                 return "INVALID RESOURCE";
867         }
868 }
869
870 static void *find_res(struct mlx4_dev *dev, u64 res_id,
871                       enum mlx4_resource type)
872 {
873         struct mlx4_priv *priv = mlx4_priv(dev);
874
875         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
876                                   res_id);
877 }
878
879 static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
880                     enum mlx4_resource type,
881                     void *res, const char *func_name)
882 {
883         struct res_common *r;
884         int err = 0;
885
886         spin_lock_irq(mlx4_tlock(dev));
887         r = find_res(dev, res_id, type);
888         if (!r) {
889                 err = -ENONET;
890                 goto exit;
891         }
892
893         if (r->state == RES_ANY_BUSY) {
894                 mlx4_warn(dev,
895                           "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
896                           func_name, slave, res_id, mlx4_resource_type_to_str(type),
897                           r->func_name);
898                 err = -EBUSY;
899                 goto exit;
900         }
901
902         if (r->owner != slave) {
903                 err = -EPERM;
904                 goto exit;
905         }
906
907         r->from_state = r->state;
908         r->state = RES_ANY_BUSY;
909         r->func_name = func_name;
910
911         if (res)
912                 *((struct res_common **)res) = r;
913
914 exit:
915         spin_unlock_irq(mlx4_tlock(dev));
916         return err;
917 }
918
919 #define get_res(dev, slave, res_id, type, res) \
920         _get_res((dev), (slave), (res_id), (type), (res), __func__)
921
922 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
923                                     enum mlx4_resource type,
924                                     u64 res_id, int *slave)
925 {
926
927         struct res_common *r;
928         int err = -ENOENT;
929         int id = res_id;
930
931         if (type == RES_QP)
932                 id &= 0x7fffff;
933         spin_lock(mlx4_tlock(dev));
934
935         r = find_res(dev, id, type);
936         if (r) {
937                 *slave = r->owner;
938                 err = 0;
939         }
940         spin_unlock(mlx4_tlock(dev));
941
942         return err;
943 }
944
945 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
946                     enum mlx4_resource type)
947 {
948         struct res_common *r;
949
950         spin_lock_irq(mlx4_tlock(dev));
951         r = find_res(dev, res_id, type);
952         if (r) {
953                 r->state = r->from_state;
954                 r->func_name = "";
955         }
956         spin_unlock_irq(mlx4_tlock(dev));
957 }
958
959 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
960                              u64 in_param, u64 *out_param, int port);
961
962 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
963                                    int counter_index)
964 {
965         struct res_common *r;
966         struct res_counter *counter;
967         int ret = 0;
968
969         if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
970                 return ret;
971
972         spin_lock_irq(mlx4_tlock(dev));
973         r = find_res(dev, counter_index, RES_COUNTER);
974         if (!r || r->owner != slave) {
975                 ret = -EINVAL;
976         } else {
977                 counter = container_of(r, struct res_counter, com);
978                 if (!counter->port)
979                         counter->port = port;
980         }
981
982         spin_unlock_irq(mlx4_tlock(dev));
983         return ret;
984 }
985
986 static int handle_unexisting_counter(struct mlx4_dev *dev,
987                                      struct mlx4_qp_context *qpc, u8 slave,
988                                      int port)
989 {
990         struct mlx4_priv *priv = mlx4_priv(dev);
991         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
992         struct res_common *tmp;
993         struct res_counter *counter;
994         u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
995         int err = 0;
996
997         spin_lock_irq(mlx4_tlock(dev));
998         list_for_each_entry(tmp,
999                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1000                             list) {
1001                 counter = container_of(tmp, struct res_counter, com);
1002                 if (port == counter->port) {
1003                         qpc->pri_path.counter_index  = counter->com.res_id;
1004                         spin_unlock_irq(mlx4_tlock(dev));
1005                         return 0;
1006                 }
1007         }
1008         spin_unlock_irq(mlx4_tlock(dev));
1009
1010         /* No existing counter, need to allocate a new counter */
1011         err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1012                                 port);
1013         if (err == -ENOENT) {
1014                 err = 0;
1015         } else if (err && err != -ENOSPC) {
1016                 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1017                          __func__, slave, err);
1018         } else {
1019                 qpc->pri_path.counter_index = counter_idx;
1020                 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1021                          __func__, slave, qpc->pri_path.counter_index);
1022                 err = 0;
1023         }
1024
1025         return err;
1026 }
1027
1028 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1029                           u8 slave, int port)
1030 {
1031         if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1032                 return handle_existing_counter(dev, slave, port,
1033                                                qpc->pri_path.counter_index);
1034
1035         return handle_unexisting_counter(dev, qpc, slave, port);
1036 }
1037
1038 static struct res_common *alloc_qp_tr(int id)
1039 {
1040         struct res_qp *ret;
1041
1042         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1043         if (!ret)
1044                 return NULL;
1045
1046         ret->com.res_id = id;
1047         ret->com.state = RES_QP_RESERVED;
1048         ret->local_qpn = id;
1049         INIT_LIST_HEAD(&ret->mcg_list);
1050         spin_lock_init(&ret->mcg_spl);
1051         atomic_set(&ret->ref_count, 0);
1052
1053         return &ret->com;
1054 }
1055
1056 static struct res_common *alloc_mtt_tr(int id, int order)
1057 {
1058         struct res_mtt *ret;
1059
1060         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1061         if (!ret)
1062                 return NULL;
1063
1064         ret->com.res_id = id;
1065         ret->order = order;
1066         ret->com.state = RES_MTT_ALLOCATED;
1067         atomic_set(&ret->ref_count, 0);
1068
1069         return &ret->com;
1070 }
1071
1072 static struct res_common *alloc_mpt_tr(int id, int key)
1073 {
1074         struct res_mpt *ret;
1075
1076         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1077         if (!ret)
1078                 return NULL;
1079
1080         ret->com.res_id = id;
1081         ret->com.state = RES_MPT_RESERVED;
1082         ret->key = key;
1083
1084         return &ret->com;
1085 }
1086
1087 static struct res_common *alloc_eq_tr(int id)
1088 {
1089         struct res_eq *ret;
1090
1091         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1092         if (!ret)
1093                 return NULL;
1094
1095         ret->com.res_id = id;
1096         ret->com.state = RES_EQ_RESERVED;
1097
1098         return &ret->com;
1099 }
1100
1101 static struct res_common *alloc_cq_tr(int id)
1102 {
1103         struct res_cq *ret;
1104
1105         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1106         if (!ret)
1107                 return NULL;
1108
1109         ret->com.res_id = id;
1110         ret->com.state = RES_CQ_ALLOCATED;
1111         atomic_set(&ret->ref_count, 0);
1112
1113         return &ret->com;
1114 }
1115
1116 static struct res_common *alloc_srq_tr(int id)
1117 {
1118         struct res_srq *ret;
1119
1120         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1121         if (!ret)
1122                 return NULL;
1123
1124         ret->com.res_id = id;
1125         ret->com.state = RES_SRQ_ALLOCATED;
1126         atomic_set(&ret->ref_count, 0);
1127
1128         return &ret->com;
1129 }
1130
1131 static struct res_common *alloc_counter_tr(int id, int port)
1132 {
1133         struct res_counter *ret;
1134
1135         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1136         if (!ret)
1137                 return NULL;
1138
1139         ret->com.res_id = id;
1140         ret->com.state = RES_COUNTER_ALLOCATED;
1141         ret->port = port;
1142
1143         return &ret->com;
1144 }
1145
1146 static struct res_common *alloc_xrcdn_tr(int id)
1147 {
1148         struct res_xrcdn *ret;
1149
1150         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1151         if (!ret)
1152                 return NULL;
1153
1154         ret->com.res_id = id;
1155         ret->com.state = RES_XRCD_ALLOCATED;
1156
1157         return &ret->com;
1158 }
1159
1160 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1161 {
1162         struct res_fs_rule *ret;
1163
1164         ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1165         if (!ret)
1166                 return NULL;
1167
1168         ret->com.res_id = id;
1169         ret->com.state = RES_FS_RULE_ALLOCATED;
1170         ret->qpn = qpn;
1171         return &ret->com;
1172 }
1173
1174 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1175                                    int extra)
1176 {
1177         struct res_common *ret;
1178
1179         switch (type) {
1180         case RES_QP:
1181                 ret = alloc_qp_tr(id);
1182                 break;
1183         case RES_MPT:
1184                 ret = alloc_mpt_tr(id, extra);
1185                 break;
1186         case RES_MTT:
1187                 ret = alloc_mtt_tr(id, extra);
1188                 break;
1189         case RES_EQ:
1190                 ret = alloc_eq_tr(id);
1191                 break;
1192         case RES_CQ:
1193                 ret = alloc_cq_tr(id);
1194                 break;
1195         case RES_SRQ:
1196                 ret = alloc_srq_tr(id);
1197                 break;
1198         case RES_MAC:
1199                 pr_err("implementation missing\n");
1200                 return NULL;
1201         case RES_COUNTER:
1202                 ret = alloc_counter_tr(id, extra);
1203                 break;
1204         case RES_XRCD:
1205                 ret = alloc_xrcdn_tr(id);
1206                 break;
1207         case RES_FS_RULE:
1208                 ret = alloc_fs_rule_tr(id, extra);
1209                 break;
1210         default:
1211                 return NULL;
1212         }
1213         if (ret)
1214                 ret->owner = slave;
1215
1216         return ret;
1217 }
1218
1219 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1220                           struct mlx4_counter *data)
1221 {
1222         struct mlx4_priv *priv = mlx4_priv(dev);
1223         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1224         struct res_common *tmp;
1225         struct res_counter *counter;
1226         int *counters_arr;
1227         int i = 0, err = 0;
1228
1229         memset(data, 0, sizeof(*data));
1230
1231         counters_arr = kmalloc_array(dev->caps.max_counters,
1232                                      sizeof(*counters_arr), GFP_KERNEL);
1233         if (!counters_arr)
1234                 return -ENOMEM;
1235
1236         spin_lock_irq(mlx4_tlock(dev));
1237         list_for_each_entry(tmp,
1238                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1239                             list) {
1240                 counter = container_of(tmp, struct res_counter, com);
1241                 if (counter->port == port) {
1242                         counters_arr[i] = (int)tmp->res_id;
1243                         i++;
1244                 }
1245         }
1246         spin_unlock_irq(mlx4_tlock(dev));
1247         counters_arr[i] = -1;
1248
1249         i = 0;
1250
1251         while (counters_arr[i] != -1) {
1252                 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1253                                              0);
1254                 if (err) {
1255                         memset(data, 0, sizeof(*data));
1256                         goto table_changed;
1257                 }
1258                 i++;
1259         }
1260
1261 table_changed:
1262         kfree(counters_arr);
1263         return 0;
1264 }
1265
1266 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1267                          enum mlx4_resource type, int extra)
1268 {
1269         int i;
1270         int err;
1271         struct mlx4_priv *priv = mlx4_priv(dev);
1272         struct res_common **res_arr;
1273         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1274         struct rb_root *root = &tracker->res_tree[type];
1275
1276         res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
1277         if (!res_arr)
1278                 return -ENOMEM;
1279
1280         for (i = 0; i < count; ++i) {
1281                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1282                 if (!res_arr[i]) {
1283                         for (--i; i >= 0; --i)
1284                                 kfree(res_arr[i]);
1285
1286                         kfree(res_arr);
1287                         return -ENOMEM;
1288                 }
1289         }
1290
1291         spin_lock_irq(mlx4_tlock(dev));
1292         for (i = 0; i < count; ++i) {
1293                 if (find_res(dev, base + i, type)) {
1294                         err = -EEXIST;
1295                         goto undo;
1296                 }
1297                 err = res_tracker_insert(root, res_arr[i]);
1298                 if (err)
1299                         goto undo;
1300                 list_add_tail(&res_arr[i]->list,
1301                               &tracker->slave_list[slave].res_list[type]);
1302         }
1303         spin_unlock_irq(mlx4_tlock(dev));
1304         kfree(res_arr);
1305
1306         return 0;
1307
1308 undo:
1309         for (--i; i >= 0; --i) {
1310                 rb_erase(&res_arr[i]->node, root);
1311                 list_del_init(&res_arr[i]->list);
1312         }
1313
1314         spin_unlock_irq(mlx4_tlock(dev));
1315
1316         for (i = 0; i < count; ++i)
1317                 kfree(res_arr[i]);
1318
1319         kfree(res_arr);
1320
1321         return err;
1322 }
1323
1324 static int remove_qp_ok(struct res_qp *res)
1325 {
1326         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1327             !list_empty(&res->mcg_list)) {
1328                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1329                        res->com.state, atomic_read(&res->ref_count));
1330                 return -EBUSY;
1331         } else if (res->com.state != RES_QP_RESERVED) {
1332                 return -EPERM;
1333         }
1334
1335         return 0;
1336 }
1337
1338 static int remove_mtt_ok(struct res_mtt *res, int order)
1339 {
1340         if (res->com.state == RES_MTT_BUSY ||
1341             atomic_read(&res->ref_count)) {
1342                 pr_devel("%s-%d: state %s, ref_count %d\n",
1343                          __func__, __LINE__,
1344                          mtt_states_str(res->com.state),
1345                          atomic_read(&res->ref_count));
1346                 return -EBUSY;
1347         } else if (res->com.state != RES_MTT_ALLOCATED)
1348                 return -EPERM;
1349         else if (res->order != order)
1350                 return -EINVAL;
1351
1352         return 0;
1353 }
1354
1355 static int remove_mpt_ok(struct res_mpt *res)
1356 {
1357         if (res->com.state == RES_MPT_BUSY)
1358                 return -EBUSY;
1359         else if (res->com.state != RES_MPT_RESERVED)
1360                 return -EPERM;
1361
1362         return 0;
1363 }
1364
1365 static int remove_eq_ok(struct res_eq *res)
1366 {
1367         if (res->com.state == RES_MPT_BUSY)
1368                 return -EBUSY;
1369         else if (res->com.state != RES_MPT_RESERVED)
1370                 return -EPERM;
1371
1372         return 0;
1373 }
1374
1375 static int remove_counter_ok(struct res_counter *res)
1376 {
1377         if (res->com.state == RES_COUNTER_BUSY)
1378                 return -EBUSY;
1379         else if (res->com.state != RES_COUNTER_ALLOCATED)
1380                 return -EPERM;
1381
1382         return 0;
1383 }
1384
1385 static int remove_xrcdn_ok(struct res_xrcdn *res)
1386 {
1387         if (res->com.state == RES_XRCD_BUSY)
1388                 return -EBUSY;
1389         else if (res->com.state != RES_XRCD_ALLOCATED)
1390                 return -EPERM;
1391
1392         return 0;
1393 }
1394
1395 static int remove_fs_rule_ok(struct res_fs_rule *res)
1396 {
1397         if (res->com.state == RES_FS_RULE_BUSY)
1398                 return -EBUSY;
1399         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1400                 return -EPERM;
1401
1402         return 0;
1403 }
1404
1405 static int remove_cq_ok(struct res_cq *res)
1406 {
1407         if (res->com.state == RES_CQ_BUSY)
1408                 return -EBUSY;
1409         else if (res->com.state != RES_CQ_ALLOCATED)
1410                 return -EPERM;
1411
1412         return 0;
1413 }
1414
1415 static int remove_srq_ok(struct res_srq *res)
1416 {
1417         if (res->com.state == RES_SRQ_BUSY)
1418                 return -EBUSY;
1419         else if (res->com.state != RES_SRQ_ALLOCATED)
1420                 return -EPERM;
1421
1422         return 0;
1423 }
1424
1425 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1426 {
1427         switch (type) {
1428         case RES_QP:
1429                 return remove_qp_ok((struct res_qp *)res);
1430         case RES_CQ:
1431                 return remove_cq_ok((struct res_cq *)res);
1432         case RES_SRQ:
1433                 return remove_srq_ok((struct res_srq *)res);
1434         case RES_MPT:
1435                 return remove_mpt_ok((struct res_mpt *)res);
1436         case RES_MTT:
1437                 return remove_mtt_ok((struct res_mtt *)res, extra);
1438         case RES_MAC:
1439                 return -EOPNOTSUPP;
1440         case RES_EQ:
1441                 return remove_eq_ok((struct res_eq *)res);
1442         case RES_COUNTER:
1443                 return remove_counter_ok((struct res_counter *)res);
1444         case RES_XRCD:
1445                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1446         case RES_FS_RULE:
1447                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1448         default:
1449                 return -EINVAL;
1450         }
1451 }
1452
1453 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1454                          enum mlx4_resource type, int extra)
1455 {
1456         u64 i;
1457         int err;
1458         struct mlx4_priv *priv = mlx4_priv(dev);
1459         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1460         struct res_common *r;
1461
1462         spin_lock_irq(mlx4_tlock(dev));
1463         for (i = base; i < base + count; ++i) {
1464                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1465                 if (!r) {
1466                         err = -ENOENT;
1467                         goto out;
1468                 }
1469                 if (r->owner != slave) {
1470                         err = -EPERM;
1471                         goto out;
1472                 }
1473                 err = remove_ok(r, type, extra);
1474                 if (err)
1475                         goto out;
1476         }
1477
1478         for (i = base; i < base + count; ++i) {
1479                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1480                 rb_erase(&r->node, &tracker->res_tree[type]);
1481                 list_del(&r->list);
1482                 kfree(r);
1483         }
1484         err = 0;
1485
1486 out:
1487         spin_unlock_irq(mlx4_tlock(dev));
1488
1489         return err;
1490 }
1491
1492 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1493                                 enum res_qp_states state, struct res_qp **qp,
1494                                 int alloc)
1495 {
1496         struct mlx4_priv *priv = mlx4_priv(dev);
1497         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1498         struct res_qp *r;
1499         int err = 0;
1500
1501         spin_lock_irq(mlx4_tlock(dev));
1502         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1503         if (!r)
1504                 err = -ENOENT;
1505         else if (r->com.owner != slave)
1506                 err = -EPERM;
1507         else {
1508                 switch (state) {
1509                 case RES_QP_BUSY:
1510                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1511                                  __func__, r->com.res_id);
1512                         err = -EBUSY;
1513                         break;
1514
1515                 case RES_QP_RESERVED:
1516                         if (r->com.state == RES_QP_MAPPED && !alloc)
1517                                 break;
1518
1519                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1520                         err = -EINVAL;
1521                         break;
1522
1523                 case RES_QP_MAPPED:
1524                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1525                             r->com.state == RES_QP_HW)
1526                                 break;
1527                         else {
1528                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1529                                           r->com.res_id);
1530                                 err = -EINVAL;
1531                         }
1532
1533                         break;
1534
1535                 case RES_QP_HW:
1536                         if (r->com.state != RES_QP_MAPPED)
1537                                 err = -EINVAL;
1538                         break;
1539                 default:
1540                         err = -EINVAL;
1541                 }
1542
1543                 if (!err) {
1544                         r->com.from_state = r->com.state;
1545                         r->com.to_state = state;
1546                         r->com.state = RES_QP_BUSY;
1547                         if (qp)
1548                                 *qp = r;
1549                 }
1550         }
1551
1552         spin_unlock_irq(mlx4_tlock(dev));
1553
1554         return err;
1555 }
1556
1557 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1558                                 enum res_mpt_states state, struct res_mpt **mpt)
1559 {
1560         struct mlx4_priv *priv = mlx4_priv(dev);
1561         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1562         struct res_mpt *r;
1563         int err = 0;
1564
1565         spin_lock_irq(mlx4_tlock(dev));
1566         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1567         if (!r)
1568                 err = -ENOENT;
1569         else if (r->com.owner != slave)
1570                 err = -EPERM;
1571         else {
1572                 switch (state) {
1573                 case RES_MPT_BUSY:
1574                         err = -EINVAL;
1575                         break;
1576
1577                 case RES_MPT_RESERVED:
1578                         if (r->com.state != RES_MPT_MAPPED)
1579                                 err = -EINVAL;
1580                         break;
1581
1582                 case RES_MPT_MAPPED:
1583                         if (r->com.state != RES_MPT_RESERVED &&
1584                             r->com.state != RES_MPT_HW)
1585                                 err = -EINVAL;
1586                         break;
1587
1588                 case RES_MPT_HW:
1589                         if (r->com.state != RES_MPT_MAPPED)
1590                                 err = -EINVAL;
1591                         break;
1592                 default:
1593                         err = -EINVAL;
1594                 }
1595
1596                 if (!err) {
1597                         r->com.from_state = r->com.state;
1598                         r->com.to_state = state;
1599                         r->com.state = RES_MPT_BUSY;
1600                         if (mpt)
1601                                 *mpt = r;
1602                 }
1603         }
1604
1605         spin_unlock_irq(mlx4_tlock(dev));
1606
1607         return err;
1608 }
1609
1610 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1611                                 enum res_eq_states state, struct res_eq **eq)
1612 {
1613         struct mlx4_priv *priv = mlx4_priv(dev);
1614         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1615         struct res_eq *r;
1616         int err = 0;
1617
1618         spin_lock_irq(mlx4_tlock(dev));
1619         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1620         if (!r)
1621                 err = -ENOENT;
1622         else if (r->com.owner != slave)
1623                 err = -EPERM;
1624         else {
1625                 switch (state) {
1626                 case RES_EQ_BUSY:
1627                         err = -EINVAL;
1628                         break;
1629
1630                 case RES_EQ_RESERVED:
1631                         if (r->com.state != RES_EQ_HW)
1632                                 err = -EINVAL;
1633                         break;
1634
1635                 case RES_EQ_HW:
1636                         if (r->com.state != RES_EQ_RESERVED)
1637                                 err = -EINVAL;
1638                         break;
1639
1640                 default:
1641                         err = -EINVAL;
1642                 }
1643
1644                 if (!err) {
1645                         r->com.from_state = r->com.state;
1646                         r->com.to_state = state;
1647                         r->com.state = RES_EQ_BUSY;
1648                 }
1649         }
1650
1651         spin_unlock_irq(mlx4_tlock(dev));
1652
1653         if (!err && eq)
1654                 *eq = r;
1655
1656         return err;
1657 }
1658
1659 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1660                                 enum res_cq_states state, struct res_cq **cq)
1661 {
1662         struct mlx4_priv *priv = mlx4_priv(dev);
1663         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1664         struct res_cq *r;
1665         int err;
1666
1667         spin_lock_irq(mlx4_tlock(dev));
1668         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1669         if (!r) {
1670                 err = -ENOENT;
1671         } else if (r->com.owner != slave) {
1672                 err = -EPERM;
1673         } else if (state == RES_CQ_ALLOCATED) {
1674                 if (r->com.state != RES_CQ_HW)
1675                         err = -EINVAL;
1676                 else if (atomic_read(&r->ref_count))
1677                         err = -EBUSY;
1678                 else
1679                         err = 0;
1680         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1681                 err = -EINVAL;
1682         } else {
1683                 err = 0;
1684         }
1685
1686         if (!err) {
1687                 r->com.from_state = r->com.state;
1688                 r->com.to_state = state;
1689                 r->com.state = RES_CQ_BUSY;
1690                 if (cq)
1691                         *cq = r;
1692         }
1693
1694         spin_unlock_irq(mlx4_tlock(dev));
1695
1696         return err;
1697 }
1698
1699 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1700                                  enum res_srq_states state, struct res_srq **srq)
1701 {
1702         struct mlx4_priv *priv = mlx4_priv(dev);
1703         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1704         struct res_srq *r;
1705         int err = 0;
1706
1707         spin_lock_irq(mlx4_tlock(dev));
1708         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1709         if (!r) {
1710                 err = -ENOENT;
1711         } else if (r->com.owner != slave) {
1712                 err = -EPERM;
1713         } else if (state == RES_SRQ_ALLOCATED) {
1714                 if (r->com.state != RES_SRQ_HW)
1715                         err = -EINVAL;
1716                 else if (atomic_read(&r->ref_count))
1717                         err = -EBUSY;
1718         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1719                 err = -EINVAL;
1720         }
1721
1722         if (!err) {
1723                 r->com.from_state = r->com.state;
1724                 r->com.to_state = state;
1725                 r->com.state = RES_SRQ_BUSY;
1726                 if (srq)
1727                         *srq = r;
1728         }
1729
1730         spin_unlock_irq(mlx4_tlock(dev));
1731
1732         return err;
1733 }
1734
1735 static void res_abort_move(struct mlx4_dev *dev, int slave,
1736                            enum mlx4_resource type, int id)
1737 {
1738         struct mlx4_priv *priv = mlx4_priv(dev);
1739         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1740         struct res_common *r;
1741
1742         spin_lock_irq(mlx4_tlock(dev));
1743         r = res_tracker_lookup(&tracker->res_tree[type], id);
1744         if (r && (r->owner == slave))
1745                 r->state = r->from_state;
1746         spin_unlock_irq(mlx4_tlock(dev));
1747 }
1748
1749 static void res_end_move(struct mlx4_dev *dev, int slave,
1750                          enum mlx4_resource type, int id)
1751 {
1752         struct mlx4_priv *priv = mlx4_priv(dev);
1753         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1754         struct res_common *r;
1755
1756         spin_lock_irq(mlx4_tlock(dev));
1757         r = res_tracker_lookup(&tracker->res_tree[type], id);
1758         if (r && (r->owner == slave))
1759                 r->state = r->to_state;
1760         spin_unlock_irq(mlx4_tlock(dev));
1761 }
1762
1763 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1764 {
1765         return mlx4_is_qp_reserved(dev, qpn) &&
1766                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1767 }
1768
1769 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1770 {
1771         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1772 }
1773
1774 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1775                         u64 in_param, u64 *out_param)
1776 {
1777         int err;
1778         int count;
1779         int align;
1780         int base;
1781         int qpn;
1782         u8 flags;
1783
1784         switch (op) {
1785         case RES_OP_RESERVE:
1786                 count = get_param_l(&in_param) & 0xffffff;
1787                 /* Turn off all unsupported QP allocation flags that the
1788                  * slave tries to set.
1789                  */
1790                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1791                 align = get_param_h(&in_param);
1792                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1793                 if (err)
1794                         return err;
1795
1796                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1797                 if (err) {
1798                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1799                         return err;
1800                 }
1801
1802                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1803                 if (err) {
1804                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1805                         __mlx4_qp_release_range(dev, base, count);
1806                         return err;
1807                 }
1808                 set_param_l(out_param, base);
1809                 break;
1810         case RES_OP_MAP_ICM:
1811                 qpn = get_param_l(&in_param) & 0x7fffff;
1812                 if (valid_reserved(dev, slave, qpn)) {
1813                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1814                         if (err)
1815                                 return err;
1816                 }
1817
1818                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1819                                            NULL, 1);
1820                 if (err)
1821                         return err;
1822
1823                 if (!fw_reserved(dev, qpn)) {
1824                         err = __mlx4_qp_alloc_icm(dev, qpn);
1825                         if (err) {
1826                                 res_abort_move(dev, slave, RES_QP, qpn);
1827                                 return err;
1828                         }
1829                 }
1830
1831                 res_end_move(dev, slave, RES_QP, qpn);
1832                 break;
1833
1834         default:
1835                 err = -EINVAL;
1836                 break;
1837         }
1838         return err;
1839 }
1840
1841 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1842                          u64 in_param, u64 *out_param)
1843 {
1844         int err = -EINVAL;
1845         int base;
1846         int order;
1847
1848         if (op != RES_OP_RESERVE_AND_MAP)
1849                 return err;
1850
1851         order = get_param_l(&in_param);
1852
1853         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1854         if (err)
1855                 return err;
1856
1857         base = __mlx4_alloc_mtt_range(dev, order);
1858         if (base == -1) {
1859                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1860                 return -ENOMEM;
1861         }
1862
1863         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1864         if (err) {
1865                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1866                 __mlx4_free_mtt_range(dev, base, order);
1867         } else {
1868                 set_param_l(out_param, base);
1869         }
1870
1871         return err;
1872 }
1873
1874 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1875                          u64 in_param, u64 *out_param)
1876 {
1877         int err = -EINVAL;
1878         int index;
1879         int id;
1880         struct res_mpt *mpt;
1881
1882         switch (op) {
1883         case RES_OP_RESERVE:
1884                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1885                 if (err)
1886                         break;
1887
1888                 index = __mlx4_mpt_reserve(dev);
1889                 if (index == -1) {
1890                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1891                         break;
1892                 }
1893                 id = index & mpt_mask(dev);
1894
1895                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1896                 if (err) {
1897                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1898                         __mlx4_mpt_release(dev, index);
1899                         break;
1900                 }
1901                 set_param_l(out_param, index);
1902                 break;
1903         case RES_OP_MAP_ICM:
1904                 index = get_param_l(&in_param);
1905                 id = index & mpt_mask(dev);
1906                 err = mr_res_start_move_to(dev, slave, id,
1907                                            RES_MPT_MAPPED, &mpt);
1908                 if (err)
1909                         return err;
1910
1911                 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1912                 if (err) {
1913                         res_abort_move(dev, slave, RES_MPT, id);
1914                         return err;
1915                 }
1916
1917                 res_end_move(dev, slave, RES_MPT, id);
1918                 break;
1919         }
1920         return err;
1921 }
1922
1923 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1924                         u64 in_param, u64 *out_param)
1925 {
1926         int cqn;
1927         int err;
1928
1929         switch (op) {
1930         case RES_OP_RESERVE_AND_MAP:
1931                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1932                 if (err)
1933                         break;
1934
1935                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1936                 if (err) {
1937                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1938                         break;
1939                 }
1940
1941                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1942                 if (err) {
1943                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1944                         __mlx4_cq_free_icm(dev, cqn);
1945                         break;
1946                 }
1947
1948                 set_param_l(out_param, cqn);
1949                 break;
1950
1951         default:
1952                 err = -EINVAL;
1953         }
1954
1955         return err;
1956 }
1957
1958 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1959                          u64 in_param, u64 *out_param)
1960 {
1961         int srqn;
1962         int err;
1963
1964         switch (op) {
1965         case RES_OP_RESERVE_AND_MAP:
1966                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1967                 if (err)
1968                         break;
1969
1970                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1971                 if (err) {
1972                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1973                         break;
1974                 }
1975
1976                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1977                 if (err) {
1978                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1979                         __mlx4_srq_free_icm(dev, srqn);
1980                         break;
1981                 }
1982
1983                 set_param_l(out_param, srqn);
1984                 break;
1985
1986         default:
1987                 err = -EINVAL;
1988         }
1989
1990         return err;
1991 }
1992
1993 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1994                                      u8 smac_index, u64 *mac)
1995 {
1996         struct mlx4_priv *priv = mlx4_priv(dev);
1997         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1998         struct list_head *mac_list =
1999                 &tracker->slave_list[slave].res_list[RES_MAC];
2000         struct mac_res *res, *tmp;
2001
2002         list_for_each_entry_safe(res, tmp, mac_list, list) {
2003                 if (res->smac_index == smac_index && res->port == (u8) port) {
2004                         *mac = res->mac;
2005                         return 0;
2006                 }
2007         }
2008         return -ENOENT;
2009 }
2010
2011 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2012 {
2013         struct mlx4_priv *priv = mlx4_priv(dev);
2014         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2015         struct list_head *mac_list =
2016                 &tracker->slave_list[slave].res_list[RES_MAC];
2017         struct mac_res *res, *tmp;
2018
2019         list_for_each_entry_safe(res, tmp, mac_list, list) {
2020                 if (res->mac == mac && res->port == (u8) port) {
2021                         /* mac found. update ref count */
2022                         ++res->ref_count;
2023                         return 0;
2024                 }
2025         }
2026
2027         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2028                 return -EINVAL;
2029         res = kzalloc(sizeof(*res), GFP_KERNEL);
2030         if (!res) {
2031                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2032                 return -ENOMEM;
2033         }
2034         res->mac = mac;
2035         res->port = (u8) port;
2036         res->smac_index = smac_index;
2037         res->ref_count = 1;
2038         list_add_tail(&res->list,
2039                       &tracker->slave_list[slave].res_list[RES_MAC]);
2040         return 0;
2041 }
2042
2043 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2044                                int port)
2045 {
2046         struct mlx4_priv *priv = mlx4_priv(dev);
2047         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2048         struct list_head *mac_list =
2049                 &tracker->slave_list[slave].res_list[RES_MAC];
2050         struct mac_res *res, *tmp;
2051
2052         list_for_each_entry_safe(res, tmp, mac_list, list) {
2053                 if (res->mac == mac && res->port == (u8) port) {
2054                         if (!--res->ref_count) {
2055                                 list_del(&res->list);
2056                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2057                                 kfree(res);
2058                         }
2059                         break;
2060                 }
2061         }
2062 }
2063
2064 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2065 {
2066         struct mlx4_priv *priv = mlx4_priv(dev);
2067         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2068         struct list_head *mac_list =
2069                 &tracker->slave_list[slave].res_list[RES_MAC];
2070         struct mac_res *res, *tmp;
2071         int i;
2072
2073         list_for_each_entry_safe(res, tmp, mac_list, list) {
2074                 list_del(&res->list);
2075                 /* dereference the mac the num times the slave referenced it */
2076                 for (i = 0; i < res->ref_count; i++)
2077                         __mlx4_unregister_mac(dev, res->port, res->mac);
2078                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2079                 kfree(res);
2080         }
2081 }
2082
2083 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2084                          u64 in_param, u64 *out_param, int in_port)
2085 {
2086         int err = -EINVAL;
2087         int port;
2088         u64 mac;
2089         u8 smac_index;
2090
2091         if (op != RES_OP_RESERVE_AND_MAP)
2092                 return err;
2093
2094         port = !in_port ? get_param_l(out_param) : in_port;
2095         port = mlx4_slave_convert_port(
2096                         dev, slave, port);
2097
2098         if (port < 0)
2099                 return -EINVAL;
2100         mac = in_param;
2101
2102         err = __mlx4_register_mac(dev, port, mac);
2103         if (err >= 0) {
2104                 smac_index = err;
2105                 set_param_l(out_param, err);
2106                 err = 0;
2107         }
2108
2109         if (!err) {
2110                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2111                 if (err)
2112                         __mlx4_unregister_mac(dev, port, mac);
2113         }
2114         return err;
2115 }
2116
2117 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2118                              int port, int vlan_index)
2119 {
2120         struct mlx4_priv *priv = mlx4_priv(dev);
2121         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2122         struct list_head *vlan_list =
2123                 &tracker->slave_list[slave].res_list[RES_VLAN];
2124         struct vlan_res *res, *tmp;
2125
2126         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2127                 if (res->vlan == vlan && res->port == (u8) port) {
2128                         /* vlan found. update ref count */
2129                         ++res->ref_count;
2130                         return 0;
2131                 }
2132         }
2133
2134         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2135                 return -EINVAL;
2136         res = kzalloc(sizeof(*res), GFP_KERNEL);
2137         if (!res) {
2138                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2139                 return -ENOMEM;
2140         }
2141         res->vlan = vlan;
2142         res->port = (u8) port;
2143         res->vlan_index = vlan_index;
2144         res->ref_count = 1;
2145         list_add_tail(&res->list,
2146                       &tracker->slave_list[slave].res_list[RES_VLAN]);
2147         return 0;
2148 }
2149
2150
2151 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2152                                 int port)
2153 {
2154         struct mlx4_priv *priv = mlx4_priv(dev);
2155         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2156         struct list_head *vlan_list =
2157                 &tracker->slave_list[slave].res_list[RES_VLAN];
2158         struct vlan_res *res, *tmp;
2159
2160         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2161                 if (res->vlan == vlan && res->port == (u8) port) {
2162                         if (!--res->ref_count) {
2163                                 list_del(&res->list);
2164                                 mlx4_release_resource(dev, slave, RES_VLAN,
2165                                                       1, port);
2166                                 kfree(res);
2167                         }
2168                         break;
2169                 }
2170         }
2171 }
2172
2173 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2174 {
2175         struct mlx4_priv *priv = mlx4_priv(dev);
2176         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2177         struct list_head *vlan_list =
2178                 &tracker->slave_list[slave].res_list[RES_VLAN];
2179         struct vlan_res *res, *tmp;
2180         int i;
2181
2182         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2183                 list_del(&res->list);
2184                 /* dereference the vlan the num times the slave referenced it */
2185                 for (i = 0; i < res->ref_count; i++)
2186                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
2187                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2188                 kfree(res);
2189         }
2190 }
2191
2192 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2193                           u64 in_param, u64 *out_param, int in_port)
2194 {
2195         struct mlx4_priv *priv = mlx4_priv(dev);
2196         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2197         int err;
2198         u16 vlan;
2199         int vlan_index;
2200         int port;
2201
2202         port = !in_port ? get_param_l(out_param) : in_port;
2203
2204         if (!port || op != RES_OP_RESERVE_AND_MAP)
2205                 return -EINVAL;
2206
2207         port = mlx4_slave_convert_port(
2208                         dev, slave, port);
2209
2210         if (port < 0)
2211                 return -EINVAL;
2212         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2213         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2214                 slave_state[slave].old_vlan_api = true;
2215                 return 0;
2216         }
2217
2218         vlan = (u16) in_param;
2219
2220         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2221         if (!err) {
2222                 set_param_l(out_param, (u32) vlan_index);
2223                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2224                 if (err)
2225                         __mlx4_unregister_vlan(dev, port, vlan);
2226         }
2227         return err;
2228 }
2229
2230 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2231                              u64 in_param, u64 *out_param, int port)
2232 {
2233         u32 index;
2234         int err;
2235
2236         if (op != RES_OP_RESERVE)
2237                 return -EINVAL;
2238
2239         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2240         if (err)
2241                 return err;
2242
2243         err = __mlx4_counter_alloc(dev, &index);
2244         if (err) {
2245                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2246                 return err;
2247         }
2248
2249         err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2250         if (err) {
2251                 __mlx4_counter_free(dev, index);
2252                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2253         } else {
2254                 set_param_l(out_param, index);
2255         }
2256
2257         return err;
2258 }
2259
2260 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2261                            u64 in_param, u64 *out_param)
2262 {
2263         u32 xrcdn;
2264         int err;
2265
2266         if (op != RES_OP_RESERVE)
2267                 return -EINVAL;
2268
2269         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2270         if (err)
2271                 return err;
2272
2273         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2274         if (err)
2275                 __mlx4_xrcd_free(dev, xrcdn);
2276         else
2277                 set_param_l(out_param, xrcdn);
2278
2279         return err;
2280 }
2281
2282 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2283                            struct mlx4_vhcr *vhcr,
2284                            struct mlx4_cmd_mailbox *inbox,
2285                            struct mlx4_cmd_mailbox *outbox,
2286                            struct mlx4_cmd_info *cmd)
2287 {
2288         int err;
2289         int alop = vhcr->op_modifier;
2290
2291         switch (vhcr->in_modifier & 0xFF) {
2292         case RES_QP:
2293                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2294                                    vhcr->in_param, &vhcr->out_param);
2295                 break;
2296
2297         case RES_MTT:
2298                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2299                                     vhcr->in_param, &vhcr->out_param);
2300                 break;
2301
2302         case RES_MPT:
2303                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2304                                     vhcr->in_param, &vhcr->out_param);
2305                 break;
2306
2307         case RES_CQ:
2308                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2309                                    vhcr->in_param, &vhcr->out_param);
2310                 break;
2311
2312         case RES_SRQ:
2313                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2314                                     vhcr->in_param, &vhcr->out_param);
2315                 break;
2316
2317         case RES_MAC:
2318                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2319                                     vhcr->in_param, &vhcr->out_param,
2320                                     (vhcr->in_modifier >> 8) & 0xFF);
2321                 break;
2322
2323         case RES_VLAN:
2324                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2325                                      vhcr->in_param, &vhcr->out_param,
2326                                      (vhcr->in_modifier >> 8) & 0xFF);
2327                 break;
2328
2329         case RES_COUNTER:
2330                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2331                                         vhcr->in_param, &vhcr->out_param, 0);
2332                 break;
2333
2334         case RES_XRCD:
2335                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2336                                       vhcr->in_param, &vhcr->out_param);
2337                 break;
2338
2339         default:
2340                 err = -EINVAL;
2341                 break;
2342         }
2343
2344         return err;
2345 }
2346
2347 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2348                        u64 in_param)
2349 {
2350         int err;
2351         int count;
2352         int base;
2353         int qpn;
2354
2355         switch (op) {
2356         case RES_OP_RESERVE:
2357                 base = get_param_l(&in_param) & 0x7fffff;
2358                 count = get_param_h(&in_param);
2359                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2360                 if (err)
2361                         break;
2362                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2363                 __mlx4_qp_release_range(dev, base, count);
2364                 break;
2365         case RES_OP_MAP_ICM:
2366                 qpn = get_param_l(&in_param) & 0x7fffff;
2367                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2368                                            NULL, 0);
2369                 if (err)
2370                         return err;
2371
2372                 if (!fw_reserved(dev, qpn))
2373                         __mlx4_qp_free_icm(dev, qpn);
2374
2375                 res_end_move(dev, slave, RES_QP, qpn);
2376
2377                 if (valid_reserved(dev, slave, qpn))
2378                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2379                 break;
2380         default:
2381                 err = -EINVAL;
2382                 break;
2383         }
2384         return err;
2385 }
2386
2387 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2388                         u64 in_param, u64 *out_param)
2389 {
2390         int err = -EINVAL;
2391         int base;
2392         int order;
2393
2394         if (op != RES_OP_RESERVE_AND_MAP)
2395                 return err;
2396
2397         base = get_param_l(&in_param);
2398         order = get_param_h(&in_param);
2399         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2400         if (!err) {
2401                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2402                 __mlx4_free_mtt_range(dev, base, order);
2403         }
2404         return err;
2405 }
2406
2407 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2408                         u64 in_param)
2409 {
2410         int err = -EINVAL;
2411         int index;
2412         int id;
2413         struct res_mpt *mpt;
2414
2415         switch (op) {
2416         case RES_OP_RESERVE:
2417                 index = get_param_l(&in_param);
2418                 id = index & mpt_mask(dev);
2419                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2420                 if (err)
2421                         break;
2422                 index = mpt->key;
2423                 put_res(dev, slave, id, RES_MPT);
2424
2425                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2426                 if (err)
2427                         break;
2428                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2429                 __mlx4_mpt_release(dev, index);
2430                 break;
2431         case RES_OP_MAP_ICM:
2432                 index = get_param_l(&in_param);
2433                 id = index & mpt_mask(dev);
2434                 err = mr_res_start_move_to(dev, slave, id,
2435                                            RES_MPT_RESERVED, &mpt);
2436                 if (err)
2437                         return err;
2438
2439                 __mlx4_mpt_free_icm(dev, mpt->key);
2440                 res_end_move(dev, slave, RES_MPT, id);
2441                 break;
2442         default:
2443                 err = -EINVAL;
2444                 break;
2445         }
2446         return err;
2447 }
2448
2449 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2450                        u64 in_param, u64 *out_param)
2451 {
2452         int cqn;
2453         int err;
2454
2455         switch (op) {
2456         case RES_OP_RESERVE_AND_MAP:
2457                 cqn = get_param_l(&in_param);
2458                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2459                 if (err)
2460                         break;
2461
2462                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2463                 __mlx4_cq_free_icm(dev, cqn);
2464                 break;
2465
2466         default:
2467                 err = -EINVAL;
2468                 break;
2469         }
2470
2471         return err;
2472 }
2473
2474 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2475                         u64 in_param, u64 *out_param)
2476 {
2477         int srqn;
2478         int err;
2479
2480         switch (op) {
2481         case RES_OP_RESERVE_AND_MAP:
2482                 srqn = get_param_l(&in_param);
2483                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2484                 if (err)
2485                         break;
2486
2487                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2488                 __mlx4_srq_free_icm(dev, srqn);
2489                 break;
2490
2491         default:
2492                 err = -EINVAL;
2493                 break;
2494         }
2495
2496         return err;
2497 }
2498
2499 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2500                             u64 in_param, u64 *out_param, int in_port)
2501 {
2502         int port;
2503         int err = 0;
2504
2505         switch (op) {
2506         case RES_OP_RESERVE_AND_MAP:
2507                 port = !in_port ? get_param_l(out_param) : in_port;
2508                 port = mlx4_slave_convert_port(
2509                                 dev, slave, port);
2510
2511                 if (port < 0)
2512                         return -EINVAL;
2513                 mac_del_from_slave(dev, slave, in_param, port);
2514                 __mlx4_unregister_mac(dev, port, in_param);
2515                 break;
2516         default:
2517                 err = -EINVAL;
2518                 break;
2519         }
2520
2521         return err;
2522
2523 }
2524
2525 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2526                             u64 in_param, u64 *out_param, int port)
2527 {
2528         struct mlx4_priv *priv = mlx4_priv(dev);
2529         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2530         int err = 0;
2531
2532         port = mlx4_slave_convert_port(
2533                         dev, slave, port);
2534
2535         if (port < 0)
2536                 return -EINVAL;
2537         switch (op) {
2538         case RES_OP_RESERVE_AND_MAP:
2539                 if (slave_state[slave].old_vlan_api)
2540                         return 0;
2541                 if (!port)
2542                         return -EINVAL;
2543                 vlan_del_from_slave(dev, slave, in_param, port);
2544                 __mlx4_unregister_vlan(dev, port, in_param);
2545                 break;
2546         default:
2547                 err = -EINVAL;
2548                 break;
2549         }
2550
2551         return err;
2552 }
2553
2554 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2555                             u64 in_param, u64 *out_param)
2556 {
2557         int index;
2558         int err;
2559
2560         if (op != RES_OP_RESERVE)
2561                 return -EINVAL;
2562
2563         index = get_param_l(&in_param);
2564         if (index == MLX4_SINK_COUNTER_INDEX(dev))
2565                 return 0;
2566
2567         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2568         if (err)
2569                 return err;
2570
2571         __mlx4_counter_free(dev, index);
2572         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2573
2574         return err;
2575 }
2576
2577 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2578                           u64 in_param, u64 *out_param)
2579 {
2580         int xrcdn;
2581         int err;
2582
2583         if (op != RES_OP_RESERVE)
2584                 return -EINVAL;
2585
2586         xrcdn = get_param_l(&in_param);
2587         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2588         if (err)
2589                 return err;
2590
2591         __mlx4_xrcd_free(dev, xrcdn);
2592
2593         return err;
2594 }
2595
2596 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2597                           struct mlx4_vhcr *vhcr,
2598                           struct mlx4_cmd_mailbox *inbox,
2599                           struct mlx4_cmd_mailbox *outbox,
2600                           struct mlx4_cmd_info *cmd)
2601 {
2602         int err = -EINVAL;
2603         int alop = vhcr->op_modifier;
2604
2605         switch (vhcr->in_modifier & 0xFF) {
2606         case RES_QP:
2607                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2608                                   vhcr->in_param);
2609                 break;
2610
2611         case RES_MTT:
2612                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2613                                    vhcr->in_param, &vhcr->out_param);
2614                 break;
2615
2616         case RES_MPT:
2617                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2618                                    vhcr->in_param);
2619                 break;
2620
2621         case RES_CQ:
2622                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2623                                   vhcr->in_param, &vhcr->out_param);
2624                 break;
2625
2626         case RES_SRQ:
2627                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2628                                    vhcr->in_param, &vhcr->out_param);
2629                 break;
2630
2631         case RES_MAC:
2632                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2633                                    vhcr->in_param, &vhcr->out_param,
2634                                    (vhcr->in_modifier >> 8) & 0xFF);
2635                 break;
2636
2637         case RES_VLAN:
2638                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2639                                     vhcr->in_param, &vhcr->out_param,
2640                                     (vhcr->in_modifier >> 8) & 0xFF);
2641                 break;
2642
2643         case RES_COUNTER:
2644                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2645                                        vhcr->in_param, &vhcr->out_param);
2646                 break;
2647
2648         case RES_XRCD:
2649                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2650                                      vhcr->in_param, &vhcr->out_param);
2651
2652         default:
2653                 break;
2654         }
2655         return err;
2656 }
2657
2658 /* ugly but other choices are uglier */
2659 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2660 {
2661         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2662 }
2663
2664 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2665 {
2666         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2667 }
2668
2669 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2670 {
2671         return be32_to_cpu(mpt->mtt_sz);
2672 }
2673
2674 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2675 {
2676         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2677 }
2678
2679 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2680 {
2681         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2682 }
2683
2684 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2685 {
2686         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2687 }
2688
2689 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2690 {
2691         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2692 }
2693
2694 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2695 {
2696         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2697 }
2698
2699 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2700 {
2701         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2702 }
2703
2704 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2705 {
2706         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2707         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2708         int log_sq_sride = qpc->sq_size_stride & 7;
2709         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2710         int log_rq_stride = qpc->rq_size_stride & 7;
2711         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2712         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2713         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2714         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2715         int sq_size;
2716         int rq_size;
2717         int total_pages;
2718         int total_mem;
2719         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2720
2721         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2722         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2723         total_mem = sq_size + rq_size;
2724         total_pages =
2725                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2726                                    page_shift);
2727
2728         return total_pages;
2729 }
2730
2731 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2732                            int size, struct res_mtt *mtt)
2733 {
2734         int res_start = mtt->com.res_id;
2735         int res_size = (1 << mtt->order);
2736
2737         if (start < res_start || start + size > res_start + res_size)
2738                 return -EPERM;
2739         return 0;
2740 }
2741
2742 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2743                            struct mlx4_vhcr *vhcr,
2744                            struct mlx4_cmd_mailbox *inbox,
2745                            struct mlx4_cmd_mailbox *outbox,
2746                            struct mlx4_cmd_info *cmd)
2747 {
2748         int err;
2749         int index = vhcr->in_modifier;
2750         struct res_mtt *mtt;
2751         struct res_mpt *mpt = NULL;
2752         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2753         int phys;
2754         int id;
2755         u32 pd;
2756         int pd_slave;
2757
2758         id = index & mpt_mask(dev);
2759         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2760         if (err)
2761                 return err;
2762
2763         /* Disable memory windows for VFs. */
2764         if (!mr_is_region(inbox->buf)) {
2765                 err = -EPERM;
2766                 goto ex_abort;
2767         }
2768
2769         /* Make sure that the PD bits related to the slave id are zeros. */
2770         pd = mr_get_pd(inbox->buf);
2771         pd_slave = (pd >> 17) & 0x7f;
2772         if (pd_slave != 0 && --pd_slave != slave) {
2773                 err = -EPERM;
2774                 goto ex_abort;
2775         }
2776
2777         if (mr_is_fmr(inbox->buf)) {
2778                 /* FMR and Bind Enable are forbidden in slave devices. */
2779                 if (mr_is_bind_enabled(inbox->buf)) {
2780                         err = -EPERM;
2781                         goto ex_abort;
2782                 }
2783                 /* FMR and Memory Windows are also forbidden. */
2784                 if (!mr_is_region(inbox->buf)) {
2785                         err = -EPERM;
2786                         goto ex_abort;
2787                 }
2788         }
2789
2790         phys = mr_phys_mpt(inbox->buf);
2791         if (!phys) {
2792                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2793                 if (err)
2794                         goto ex_abort;
2795
2796                 err = check_mtt_range(dev, slave, mtt_base,
2797                                       mr_get_mtt_size(inbox->buf), mtt);
2798                 if (err)
2799                         goto ex_put;
2800
2801                 mpt->mtt = mtt;
2802         }
2803
2804         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2805         if (err)
2806                 goto ex_put;
2807
2808         if (!phys) {
2809                 atomic_inc(&mtt->ref_count);
2810                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2811         }
2812
2813         res_end_move(dev, slave, RES_MPT, id);
2814         return 0;
2815
2816 ex_put:
2817         if (!phys)
2818                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2819 ex_abort:
2820         res_abort_move(dev, slave, RES_MPT, id);
2821
2822         return err;
2823 }
2824
2825 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2826                            struct mlx4_vhcr *vhcr,
2827                            struct mlx4_cmd_mailbox *inbox,
2828                            struct mlx4_cmd_mailbox *outbox,
2829                            struct mlx4_cmd_info *cmd)
2830 {
2831         int err;
2832         int index = vhcr->in_modifier;
2833         struct res_mpt *mpt;
2834         int id;
2835
2836         id = index & mpt_mask(dev);
2837         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2838         if (err)
2839                 return err;
2840
2841         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2842         if (err)
2843                 goto ex_abort;
2844
2845         if (mpt->mtt)
2846                 atomic_dec(&mpt->mtt->ref_count);
2847
2848         res_end_move(dev, slave, RES_MPT, id);
2849         return 0;
2850
2851 ex_abort:
2852         res_abort_move(dev, slave, RES_MPT, id);
2853
2854         return err;
2855 }
2856
2857 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2858                            struct mlx4_vhcr *vhcr,
2859                            struct mlx4_cmd_mailbox *inbox,
2860                            struct mlx4_cmd_mailbox *outbox,
2861                            struct mlx4_cmd_info *cmd)
2862 {
2863         int err;
2864         int index = vhcr->in_modifier;
2865         struct res_mpt *mpt;
2866         int id;
2867
2868         id = index & mpt_mask(dev);
2869         err = get_res(dev, slave, id, RES_MPT, &mpt);
2870         if (err)
2871                 return err;
2872
2873         if (mpt->com.from_state == RES_MPT_MAPPED) {
2874                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2875                  * that, the VF must read the MPT. But since the MPT entry memory is not
2876                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2877                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2878                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2879                  * ownership fofollowing the change. The change here allows the VF to
2880                  * perform QUERY_MPT also when the entry is in SW ownership.
2881                  */
2882                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2883                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2884                                         mpt->key, NULL);
2885
2886                 if (NULL == mpt_entry || NULL == outbox->buf) {
2887                         err = -EINVAL;
2888                         goto out;
2889                 }
2890
2891                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2892
2893                 err = 0;
2894         } else if (mpt->com.from_state == RES_MPT_HW) {
2895                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2896         } else {
2897                 err = -EBUSY;
2898                 goto out;
2899         }
2900
2901
2902 out:
2903         put_res(dev, slave, id, RES_MPT);
2904         return err;
2905 }
2906
2907 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2908 {
2909         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2910 }
2911
2912 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2913 {
2914         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2915 }
2916
2917 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2918 {
2919         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2920 }
2921
2922 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2923                                   struct mlx4_qp_context *context)
2924 {
2925         u32 qpn = vhcr->in_modifier & 0xffffff;
2926         u32 qkey = 0;
2927
2928         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2929                 return;
2930
2931         /* adjust qkey in qp context */
2932         context->qkey = cpu_to_be32(qkey);
2933 }
2934
2935 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2936                                  struct mlx4_qp_context *qpc,
2937                                  struct mlx4_cmd_mailbox *inbox);
2938
2939 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2940                              struct mlx4_vhcr *vhcr,
2941                              struct mlx4_cmd_mailbox *inbox,
2942                              struct mlx4_cmd_mailbox *outbox,
2943                              struct mlx4_cmd_info *cmd)
2944 {
2945         int err;
2946         int qpn = vhcr->in_modifier & 0x7fffff;
2947         struct res_mtt *mtt;
2948         struct res_qp *qp;
2949         struct mlx4_qp_context *qpc = inbox->buf + 8;
2950         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2951         int mtt_size = qp_get_mtt_size(qpc);
2952         struct res_cq *rcq;
2953         struct res_cq *scq;
2954         int rcqn = qp_get_rcqn(qpc);
2955         int scqn = qp_get_scqn(qpc);
2956         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2957         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2958         struct res_srq *srq;
2959         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2960
2961         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2962         if (err)
2963                 return err;
2964
2965         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2966         if (err)
2967                 return err;
2968         qp->local_qpn = local_qpn;
2969         qp->sched_queue = 0;
2970         qp->param3 = 0;
2971         qp->vlan_control = 0;
2972         qp->fvl_rx = 0;
2973         qp->pri_path_fl = 0;
2974         qp->vlan_index = 0;
2975         qp->feup = 0;
2976         qp->qpc_flags = be32_to_cpu(qpc->flags);
2977
2978         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2979         if (err)
2980                 goto ex_abort;
2981
2982         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2983         if (err)
2984                 goto ex_put_mtt;
2985
2986         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2987         if (err)
2988                 goto ex_put_mtt;
2989
2990         if (scqn != rcqn) {
2991                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2992                 if (err)
2993                         goto ex_put_rcq;
2994         } else
2995                 scq = rcq;
2996
2997         if (use_srq) {
2998                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2999                 if (err)
3000                         goto ex_put_scq;
3001         }
3002
3003         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3004         update_pkey_index(dev, slave, inbox);
3005         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3006         if (err)
3007                 goto ex_put_srq;
3008         atomic_inc(&mtt->ref_count);
3009         qp->mtt = mtt;
3010         atomic_inc(&rcq->ref_count);
3011         qp->rcq = rcq;
3012         atomic_inc(&scq->ref_count);
3013         qp->scq = scq;
3014
3015         if (scqn != rcqn)
3016                 put_res(dev, slave, scqn, RES_CQ);
3017
3018         if (use_srq) {
3019                 atomic_inc(&srq->ref_count);
3020                 put_res(dev, slave, srqn, RES_SRQ);
3021                 qp->srq = srq;
3022         }
3023
3024         /* Save param3 for dynamic changes from VST back to VGT */
3025         qp->param3 = qpc->param3;
3026         put_res(dev, slave, rcqn, RES_CQ);
3027         put_res(dev, slave, mtt_base, RES_MTT);
3028         res_end_move(dev, slave, RES_QP, qpn);
3029
3030         return 0;
3031
3032 ex_put_srq:
3033         if (use_srq)
3034                 put_res(dev, slave, srqn, RES_SRQ);
3035 ex_put_scq:
3036         if (scqn != rcqn)
3037                 put_res(dev, slave, scqn, RES_CQ);
3038 ex_put_rcq:
3039         put_res(dev, slave, rcqn, RES_CQ);
3040 ex_put_mtt:
3041         put_res(dev, slave, mtt_base, RES_MTT);
3042 ex_abort:
3043         res_abort_move(dev, slave, RES_QP, qpn);
3044
3045         return err;
3046 }
3047
3048 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3049 {
3050         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3051 }
3052
3053 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3054 {
3055         int log_eq_size = eqc->log_eq_size & 0x1f;
3056         int page_shift = (eqc->log_page_size & 0x3f) + 12;
3057
3058         if (log_eq_size + 5 < page_shift)
3059                 return 1;
3060
3061         return 1 << (log_eq_size + 5 - page_shift);
3062 }
3063
3064 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3065 {
3066         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3067 }
3068
3069 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3070 {
3071         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3072         int page_shift = (cqc->log_page_size & 0x3f) + 12;
3073
3074         if (log_cq_size + 5 < page_shift)
3075                 return 1;
3076
3077         return 1 << (log_cq_size + 5 - page_shift);
3078 }
3079
3080 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3081                           struct mlx4_vhcr *vhcr,
3082                           struct mlx4_cmd_mailbox *inbox,
3083                           struct mlx4_cmd_mailbox *outbox,
3084                           struct mlx4_cmd_info *cmd)
3085 {
3086         int err;
3087         int eqn = vhcr->in_modifier;
3088         int res_id = (slave << 10) | eqn;
3089         struct mlx4_eq_context *eqc = inbox->buf;
3090         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3091         int mtt_size = eq_get_mtt_size(eqc);
3092         struct res_eq *eq;
3093         struct res_mtt *mtt;
3094
3095         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3096         if (err)
3097                 return err;
3098         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3099         if (err)
3100                 goto out_add;
3101
3102         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3103         if (err)
3104                 goto out_move;
3105
3106         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3107         if (err)
3108                 goto out_put;
3109
3110         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3111         if (err)
3112                 goto out_put;
3113
3114         atomic_inc(&mtt->ref_count);
3115         eq->mtt = mtt;
3116         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3117         res_end_move(dev, slave, RES_EQ, res_id);
3118         return 0;
3119
3120 out_put:
3121         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3122 out_move:
3123         res_abort_move(dev, slave, RES_EQ, res_id);
3124 out_add:
3125         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3126         return err;
3127 }
3128
3129 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3130                             struct mlx4_vhcr *vhcr,
3131                             struct mlx4_cmd_mailbox *inbox,
3132                             struct mlx4_cmd_mailbox *outbox,
3133                             struct mlx4_cmd_info *cmd)
3134 {
3135         int err;
3136         u8 get = vhcr->op_modifier;
3137
3138         if (get != 1)
3139                 return -EPERM;
3140
3141         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3142
3143         return err;
3144 }
3145
3146 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3147                               int len, struct res_mtt **res)
3148 {
3149         struct mlx4_priv *priv = mlx4_priv(dev);
3150         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3151         struct res_mtt *mtt;
3152         int err = -EINVAL;
3153
3154         spin_lock_irq(mlx4_tlock(dev));
3155         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3156                             com.list) {
3157                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3158                         *res = mtt;
3159                         mtt->com.from_state = mtt->com.state;
3160                         mtt->com.state = RES_MTT_BUSY;
3161                         err = 0;
3162                         break;
3163                 }
3164         }
3165         spin_unlock_irq(mlx4_tlock(dev));
3166
3167         return err;
3168 }
3169
3170 static int verify_qp_parameters(struct mlx4_dev *dev,
3171                                 struct mlx4_vhcr *vhcr,
3172                                 struct mlx4_cmd_mailbox *inbox,
3173                                 enum qp_transition transition, u8 slave)
3174 {
3175         u32                     qp_type;
3176         u32                     qpn;
3177         struct mlx4_qp_context  *qp_ctx;
3178         enum mlx4_qp_optpar     optpar;
3179         int port;
3180         int num_gids;
3181
3182         qp_ctx  = inbox->buf + 8;
3183         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3184         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3185
3186         if (slave != mlx4_master_func_num(dev)) {
3187                 qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
3188                 /* setting QP rate-limit is disallowed for VFs */
3189                 if (qp_ctx->rate_limit_params)
3190                         return -EPERM;
3191         }
3192
3193         switch (qp_type) {
3194         case MLX4_QP_ST_RC:
3195         case MLX4_QP_ST_XRC:
3196         case MLX4_QP_ST_UC:
3197                 switch (transition) {
3198                 case QP_TRANS_INIT2RTR:
3199                 case QP_TRANS_RTR2RTS:
3200                 case QP_TRANS_RTS2RTS:
3201                 case QP_TRANS_SQD2SQD:
3202                 case QP_TRANS_SQD2RTS:
3203                         if (slave != mlx4_master_func_num(dev)) {
3204                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3205                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3206                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3207                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3208                                         else
3209                                                 num_gids = 1;
3210                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
3211                                                 return -EINVAL;
3212                                 }
3213                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3214                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3215                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3216                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3217                                         else
3218                                                 num_gids = 1;
3219                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
3220                                                 return -EINVAL;
3221                                 }
3222                         }
3223                         break;
3224                 default:
3225                         break;
3226                 }
3227                 break;
3228
3229         case MLX4_QP_ST_MLX:
3230                 qpn = vhcr->in_modifier & 0x7fffff;
3231                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3232                 if (transition == QP_TRANS_INIT2RTR &&
3233                     slave != mlx4_master_func_num(dev) &&
3234                     mlx4_is_qp_reserved(dev, qpn) &&
3235                     !mlx4_vf_smi_enabled(dev, slave, port)) {
3236                         /* only enabled VFs may create MLX proxy QPs */
3237                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3238                                  __func__, slave, port);
3239                         return -EPERM;
3240                 }
3241                 break;
3242
3243         default:
3244                 break;
3245         }
3246
3247         return 0;
3248 }
3249
3250 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3251                            struct mlx4_vhcr *vhcr,
3252                            struct mlx4_cmd_mailbox *inbox,
3253                            struct mlx4_cmd_mailbox *outbox,
3254                            struct mlx4_cmd_info *cmd)
3255 {
3256         struct mlx4_mtt mtt;
3257         __be64 *page_list = inbox->buf;
3258         u64 *pg_list = (u64 *)page_list;
3259         int i;
3260         struct res_mtt *rmtt = NULL;
3261         int start = be64_to_cpu(page_list[0]);
3262         int npages = vhcr->in_modifier;
3263         int err;
3264
3265         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3266         if (err)
3267                 return err;
3268
3269         /* Call the SW implementation of write_mtt:
3270          * - Prepare a dummy mtt struct
3271          * - Translate inbox contents to simple addresses in host endianness */
3272         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3273                             we don't really use it */
3274         mtt.order = 0;
3275         mtt.page_shift = 0;
3276         for (i = 0; i < npages; ++i)
3277                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3278
3279         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3280                                ((u64 *)page_list + 2));
3281
3282         if (rmtt)
3283                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3284
3285         return err;
3286 }
3287
3288 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3289                           struct mlx4_vhcr *vhcr,
3290                           struct mlx4_cmd_mailbox *inbox,
3291                           struct mlx4_cmd_mailbox *outbox,
3292                           struct mlx4_cmd_info *cmd)
3293 {
3294         int eqn = vhcr->in_modifier;
3295         int res_id = eqn | (slave << 10);
3296         struct res_eq *eq;
3297         int err;
3298
3299         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3300         if (err)
3301                 return err;
3302
3303         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3304         if (err)
3305                 goto ex_abort;
3306
3307         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3308         if (err)
3309                 goto ex_put;
3310
3311         atomic_dec(&eq->mtt->ref_count);
3312         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3313         res_end_move(dev, slave, RES_EQ, res_id);
3314         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3315
3316         return 0;
3317
3318 ex_put:
3319         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3320 ex_abort:
3321         res_abort_move(dev, slave, RES_EQ, res_id);
3322
3323         return err;
3324 }
3325
3326 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3327 {
3328         struct mlx4_priv *priv = mlx4_priv(dev);
3329         struct mlx4_slave_event_eq_info *event_eq;
3330         struct mlx4_cmd_mailbox *mailbox;
3331         u32 in_modifier = 0;
3332         int err;
3333         int res_id;
3334         struct res_eq *req;
3335
3336         if (!priv->mfunc.master.slave_state)
3337                 return -EINVAL;
3338
3339         /* check for slave valid, slave not PF, and slave active */
3340         if (slave < 0 || slave > dev->persist->num_vfs ||
3341             slave == dev->caps.function ||
3342             !priv->mfunc.master.slave_state[slave].active)
3343                 return 0;
3344
3345         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3346
3347         /* Create the event only if the slave is registered */
3348         if (event_eq->eqn < 0)
3349                 return 0;
3350
3351         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3352         res_id = (slave << 10) | event_eq->eqn;
3353         err = get_res(dev, slave, res_id, RES_EQ, &req);
3354         if (err)
3355                 goto unlock;
3356
3357         if (req->com.from_state != RES_EQ_HW) {
3358                 err = -EINVAL;
3359                 goto put;
3360         }
3361
3362         mailbox = mlx4_alloc_cmd_mailbox(dev);
3363         if (IS_ERR(mailbox)) {
3364                 err = PTR_ERR(mailbox);
3365                 goto put;
3366         }
3367
3368         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3369                 ++event_eq->token;
3370                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3371         }
3372
3373         memcpy(mailbox->buf, (u8 *) eqe, 28);
3374
3375         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3376
3377         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3378                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3379                        MLX4_CMD_NATIVE);
3380
3381         put_res(dev, slave, res_id, RES_EQ);
3382         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3383         mlx4_free_cmd_mailbox(dev, mailbox);
3384         return err;
3385
3386 put:
3387         put_res(dev, slave, res_id, RES_EQ);
3388
3389 unlock:
3390         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3391         return err;
3392 }
3393
3394 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3395                           struct mlx4_vhcr *vhcr,
3396                           struct mlx4_cmd_mailbox *inbox,
3397                           struct mlx4_cmd_mailbox *outbox,
3398                           struct mlx4_cmd_info *cmd)
3399 {
3400         int eqn = vhcr->in_modifier;
3401         int res_id = eqn | (slave << 10);
3402         struct res_eq *eq;
3403         int err;
3404
3405         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3406         if (err)
3407                 return err;
3408
3409         if (eq->com.from_state != RES_EQ_HW) {
3410                 err = -EINVAL;
3411                 goto ex_put;
3412         }
3413
3414         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3415
3416 ex_put:
3417         put_res(dev, slave, res_id, RES_EQ);
3418         return err;
3419 }
3420
3421 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3422                           struct mlx4_vhcr *vhcr,
3423                           struct mlx4_cmd_mailbox *inbox,
3424                           struct mlx4_cmd_mailbox *outbox,
3425                           struct mlx4_cmd_info *cmd)
3426 {
3427         int err;
3428         int cqn = vhcr->in_modifier;
3429         struct mlx4_cq_context *cqc = inbox->buf;
3430         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3431         struct res_cq *cq = NULL;
3432         struct res_mtt *mtt;
3433
3434         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3435         if (err)
3436                 return err;
3437         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3438         if (err)
3439                 goto out_move;
3440         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3441         if (err)
3442                 goto out_put;
3443         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3444         if (err)
3445                 goto out_put;
3446         atomic_inc(&mtt->ref_count);
3447         cq->mtt = mtt;
3448         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3449         res_end_move(dev, slave, RES_CQ, cqn);
3450         return 0;
3451
3452 out_put:
3453         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3454 out_move:
3455         res_abort_move(dev, slave, RES_CQ, cqn);
3456         return err;
3457 }
3458
3459 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3460                           struct mlx4_vhcr *vhcr,
3461                           struct mlx4_cmd_mailbox *inbox,
3462                           struct mlx4_cmd_mailbox *outbox,
3463                           struct mlx4_cmd_info *cmd)
3464 {
3465         int err;
3466         int cqn = vhcr->in_modifier;
3467         struct res_cq *cq = NULL;
3468
3469         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3470         if (err)
3471                 return err;
3472         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3473         if (err)
3474                 goto out_move;
3475         atomic_dec(&cq->mtt->ref_count);
3476         res_end_move(dev, slave, RES_CQ, cqn);
3477         return 0;
3478
3479 out_move:
3480         res_abort_move(dev, slave, RES_CQ, cqn);
3481         return err;
3482 }
3483
3484 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3485                           struct mlx4_vhcr *vhcr,
3486                           struct mlx4_cmd_mailbox *inbox,
3487                           struct mlx4_cmd_mailbox *outbox,
3488                           struct mlx4_cmd_info *cmd)
3489 {
3490         int cqn = vhcr->in_modifier;
3491         struct res_cq *cq;
3492         int err;
3493
3494         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3495         if (err)
3496                 return err;
3497
3498         if (cq->com.from_state != RES_CQ_HW)
3499                 goto ex_put;
3500
3501         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3502 ex_put:
3503         put_res(dev, slave, cqn, RES_CQ);
3504
3505         return err;
3506 }
3507
3508 static int handle_resize(struct mlx4_dev *dev, int slave,
3509                          struct mlx4_vhcr *vhcr,
3510                          struct mlx4_cmd_mailbox *inbox,
3511                          struct mlx4_cmd_mailbox *outbox,
3512                          struct mlx4_cmd_info *cmd,
3513                          struct res_cq *cq)
3514 {
3515         int err;
3516         struct res_mtt *orig_mtt;
3517         struct res_mtt *mtt;
3518         struct mlx4_cq_context *cqc = inbox->buf;
3519         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3520
3521         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3522         if (err)
3523                 return err;
3524
3525         if (orig_mtt != cq->mtt) {
3526                 err = -EINVAL;
3527                 goto ex_put;
3528         }
3529
3530         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3531         if (err)
3532                 goto ex_put;
3533
3534         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3535         if (err)
3536                 goto ex_put1;
3537         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3538         if (err)
3539                 goto ex_put1;
3540         atomic_dec(&orig_mtt->ref_count);
3541         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3542         atomic_inc(&mtt->ref_count);
3543         cq->mtt = mtt;
3544         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3545         return 0;
3546
3547 ex_put1:
3548         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3549 ex_put:
3550         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3551
3552         return err;
3553
3554 }
3555
3556 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3557                            struct mlx4_vhcr *vhcr,
3558                            struct mlx4_cmd_mailbox *inbox,
3559                            struct mlx4_cmd_mailbox *outbox,
3560                            struct mlx4_cmd_info *cmd)
3561 {
3562         int cqn = vhcr->in_modifier;
3563         struct res_cq *cq;
3564         int err;
3565
3566         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3567         if (err)
3568                 return err;
3569
3570         if (cq->com.from_state != RES_CQ_HW)
3571                 goto ex_put;
3572
3573         if (vhcr->op_modifier == 0) {
3574                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3575                 goto ex_put;
3576         }
3577
3578         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3579 ex_put:
3580         put_res(dev, slave, cqn, RES_CQ);
3581
3582         return err;
3583 }
3584
3585 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3586 {
3587         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3588         int log_rq_stride = srqc->logstride & 7;
3589         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3590
3591         if (log_srq_size + log_rq_stride + 4 < page_shift)
3592                 return 1;
3593
3594         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3595 }
3596
3597 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3598                            struct mlx4_vhcr *vhcr,
3599                            struct mlx4_cmd_mailbox *inbox,
3600                            struct mlx4_cmd_mailbox *outbox,
3601                            struct mlx4_cmd_info *cmd)
3602 {
3603         int err;
3604         int srqn = vhcr->in_modifier;
3605         struct res_mtt *mtt;
3606         struct res_srq *srq = NULL;
3607         struct mlx4_srq_context *srqc = inbox->buf;
3608         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3609
3610         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3611                 return -EINVAL;
3612
3613         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3614         if (err)
3615                 return err;
3616         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3617         if (err)
3618                 goto ex_abort;
3619         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3620                               mtt);
3621         if (err)
3622                 goto ex_put_mtt;
3623
3624         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3625         if (err)
3626                 goto ex_put_mtt;
3627
3628         atomic_inc(&mtt->ref_count);
3629         srq->mtt = mtt;
3630         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3631         res_end_move(dev, slave, RES_SRQ, srqn);
3632         return 0;
3633
3634 ex_put_mtt:
3635         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3636 ex_abort:
3637         res_abort_move(dev, slave, RES_SRQ, srqn);
3638
3639         return err;
3640 }
3641
3642 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3643                            struct mlx4_vhcr *vhcr,
3644                            struct mlx4_cmd_mailbox *inbox,
3645                            struct mlx4_cmd_mailbox *outbox,
3646                            struct mlx4_cmd_info *cmd)
3647 {
3648         int err;
3649         int srqn = vhcr->in_modifier;
3650         struct res_srq *srq = NULL;
3651
3652         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3653         if (err)
3654                 return err;
3655         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3656         if (err)
3657                 goto ex_abort;
3658         atomic_dec(&srq->mtt->ref_count);
3659         if (srq->cq)
3660                 atomic_dec(&srq->cq->ref_count);
3661         res_end_move(dev, slave, RES_SRQ, srqn);
3662
3663         return 0;
3664
3665 ex_abort:
3666         res_abort_move(dev, slave, RES_SRQ, srqn);
3667
3668         return err;
3669 }
3670
3671 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3672                            struct mlx4_vhcr *vhcr,
3673                            struct mlx4_cmd_mailbox *inbox,
3674                            struct mlx4_cmd_mailbox *outbox,
3675                            struct mlx4_cmd_info *cmd)
3676 {
3677         int err;
3678         int srqn = vhcr->in_modifier;
3679         struct res_srq *srq;
3680
3681         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3682         if (err)
3683                 return err;
3684         if (srq->com.from_state != RES_SRQ_HW) {
3685                 err = -EBUSY;
3686                 goto out;
3687         }
3688         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3689 out:
3690         put_res(dev, slave, srqn, RES_SRQ);
3691         return err;
3692 }
3693
3694 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3695                          struct mlx4_vhcr *vhcr,
3696                          struct mlx4_cmd_mailbox *inbox,
3697                          struct mlx4_cmd_mailbox *outbox,
3698                          struct mlx4_cmd_info *cmd)
3699 {
3700         int err;
3701         int srqn = vhcr->in_modifier;
3702         struct res_srq *srq;
3703
3704         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3705         if (err)
3706                 return err;
3707
3708         if (srq->com.from_state != RES_SRQ_HW) {
3709                 err = -EBUSY;
3710                 goto out;
3711         }
3712
3713         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3714 out:
3715         put_res(dev, slave, srqn, RES_SRQ);
3716         return err;
3717 }
3718
3719 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3720                         struct mlx4_vhcr *vhcr,
3721                         struct mlx4_cmd_mailbox *inbox,
3722                         struct mlx4_cmd_mailbox *outbox,
3723                         struct mlx4_cmd_info *cmd)
3724 {
3725         int err;
3726         int qpn = vhcr->in_modifier & 0x7fffff;
3727         struct res_qp *qp;
3728
3729         err = get_res(dev, slave, qpn, RES_QP, &qp);
3730         if (err)
3731                 return err;
3732         if (qp->com.from_state != RES_QP_HW) {
3733                 err = -EBUSY;
3734                 goto out;
3735         }
3736
3737         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3738 out:
3739         put_res(dev, slave, qpn, RES_QP);
3740         return err;
3741 }
3742
3743 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3744                               struct mlx4_vhcr *vhcr,
3745                               struct mlx4_cmd_mailbox *inbox,
3746                               struct mlx4_cmd_mailbox *outbox,
3747                               struct mlx4_cmd_info *cmd)
3748 {
3749         struct mlx4_qp_context *context = inbox->buf + 8;
3750         adjust_proxy_tun_qkey(dev, vhcr, context);
3751         update_pkey_index(dev, slave, inbox);
3752         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3753 }
3754
3755 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3756                                   struct mlx4_qp_context *qpc,
3757                                   struct mlx4_cmd_mailbox *inbox)
3758 {
3759         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3760         u8 pri_sched_queue;
3761         int port = mlx4_slave_convert_port(
3762                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3763
3764         if (port < 0)
3765                 return -EINVAL;
3766
3767         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3768                           ((port & 1) << 6);
3769
3770         if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3771             qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3772                 qpc->pri_path.sched_queue = pri_sched_queue;
3773         }
3774
3775         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3776                 port = mlx4_slave_convert_port(
3777                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3778                                 + 1) - 1;
3779                 if (port < 0)
3780                         return -EINVAL;
3781                 qpc->alt_path.sched_queue =
3782                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3783                         (port & 1) << 6;
3784         }
3785         return 0;
3786 }
3787
3788 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3789                                 struct mlx4_qp_context *qpc,
3790                                 struct mlx4_cmd_mailbox *inbox)
3791 {
3792         u64 mac;
3793         int port;
3794         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3795         u8 sched = *(u8 *)(inbox->buf + 64);
3796         u8 smac_ix;
3797
3798         port = (sched >> 6 & 1) + 1;
3799         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3800                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3801                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3802                         return -ENOENT;
3803         }
3804         return 0;
3805 }
3806
3807 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3808                              struct mlx4_vhcr *vhcr,
3809                              struct mlx4_cmd_mailbox *inbox,
3810                              struct mlx4_cmd_mailbox *outbox,
3811                              struct mlx4_cmd_info *cmd)
3812 {
3813         int err;
3814         struct mlx4_qp_context *qpc = inbox->buf + 8;
3815         int qpn = vhcr->in_modifier & 0x7fffff;
3816         struct res_qp *qp;
3817         u8 orig_sched_queue;
3818         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3819         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3820         u8 orig_pri_path_fl = qpc->pri_path.fl;
3821         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3822         u8 orig_feup = qpc->pri_path.feup;
3823
3824         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3825         if (err)
3826                 return err;
3827         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3828         if (err)
3829                 return err;
3830
3831         if (roce_verify_mac(dev, slave, qpc, inbox))
3832                 return -EINVAL;
3833
3834         update_pkey_index(dev, slave, inbox);
3835         update_gid(dev, inbox, (u8)slave);
3836         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3837         orig_sched_queue = qpc->pri_path.sched_queue;
3838
3839         err = get_res(dev, slave, qpn, RES_QP, &qp);
3840         if (err)
3841                 return err;
3842         if (qp->com.from_state != RES_QP_HW) {
3843                 err = -EBUSY;
3844                 goto out;
3845         }
3846
3847         err = update_vport_qp_param(dev, inbox, slave, qpn);
3848         if (err)
3849                 goto out;
3850
3851         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3852 out:
3853         /* if no error, save sched queue value passed in by VF. This is
3854          * essentially the QOS value provided by the VF. This will be useful
3855          * if we allow dynamic changes from VST back to VGT
3856          */
3857         if (!err) {
3858                 qp->sched_queue = orig_sched_queue;
3859                 qp->vlan_control = orig_vlan_control;
3860                 qp->fvl_rx      =  orig_fvl_rx;
3861                 qp->pri_path_fl = orig_pri_path_fl;
3862                 qp->vlan_index  = orig_vlan_index;
3863                 qp->feup        = orig_feup;
3864         }
3865         put_res(dev, slave, qpn, RES_QP);
3866         return err;
3867 }
3868
3869 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3870                             struct mlx4_vhcr *vhcr,
3871                             struct mlx4_cmd_mailbox *inbox,
3872                             struct mlx4_cmd_mailbox *outbox,
3873                             struct mlx4_cmd_info *cmd)
3874 {
3875         int err;
3876         struct mlx4_qp_context *context = inbox->buf + 8;
3877
3878         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3879         if (err)
3880                 return err;
3881         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3882         if (err)
3883                 return err;
3884
3885         update_pkey_index(dev, slave, inbox);
3886         update_gid(dev, inbox, (u8)slave);
3887         adjust_proxy_tun_qkey(dev, vhcr, context);
3888         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3889 }
3890
3891 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3892                             struct mlx4_vhcr *vhcr,
3893                             struct mlx4_cmd_mailbox *inbox,
3894                             struct mlx4_cmd_mailbox *outbox,
3895                             struct mlx4_cmd_info *cmd)
3896 {
3897         int err;
3898         struct mlx4_qp_context *context = inbox->buf + 8;
3899
3900         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3901         if (err)
3902                 return err;
3903         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3904         if (err)
3905                 return err;
3906
3907         update_pkey_index(dev, slave, inbox);
3908         update_gid(dev, inbox, (u8)slave);
3909         adjust_proxy_tun_qkey(dev, vhcr, context);
3910         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3911 }
3912
3913
3914 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3915                               struct mlx4_vhcr *vhcr,
3916                               struct mlx4_cmd_mailbox *inbox,
3917                               struct mlx4_cmd_mailbox *outbox,
3918                               struct mlx4_cmd_info *cmd)
3919 {
3920         struct mlx4_qp_context *context = inbox->buf + 8;
3921         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3922         if (err)
3923                 return err;
3924         adjust_proxy_tun_qkey(dev, vhcr, context);
3925         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3926 }
3927
3928 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3929                             struct mlx4_vhcr *vhcr,
3930                             struct mlx4_cmd_mailbox *inbox,
3931                             struct mlx4_cmd_mailbox *outbox,
3932                             struct mlx4_cmd_info *cmd)
3933 {
3934         int err;
3935         struct mlx4_qp_context *context = inbox->buf + 8;
3936
3937         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3938         if (err)
3939                 return err;
3940         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3941         if (err)
3942                 return err;
3943
3944         adjust_proxy_tun_qkey(dev, vhcr, context);
3945         update_gid(dev, inbox, (u8)slave);
3946         update_pkey_index(dev, slave, inbox);
3947         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3948 }
3949
3950 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3951                             struct mlx4_vhcr *vhcr,
3952                             struct mlx4_cmd_mailbox *inbox,
3953                             struct mlx4_cmd_mailbox *outbox,
3954                             struct mlx4_cmd_info *cmd)
3955 {
3956         int err;
3957         struct mlx4_qp_context *context = inbox->buf + 8;
3958
3959         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3960         if (err)
3961                 return err;
3962         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3963         if (err)
3964                 return err;
3965
3966         adjust_proxy_tun_qkey(dev, vhcr, context);
3967         update_gid(dev, inbox, (u8)slave);
3968         update_pkey_index(dev, slave, inbox);
3969         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3970 }
3971
3972 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3973                          struct mlx4_vhcr *vhcr,
3974                          struct mlx4_cmd_mailbox *inbox,
3975                          struct mlx4_cmd_mailbox *outbox,
3976                          struct mlx4_cmd_info *cmd)
3977 {
3978         int err;
3979         int qpn = vhcr->in_modifier & 0x7fffff;
3980         struct res_qp *qp;
3981
3982         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3983         if (err)
3984                 return err;
3985         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3986         if (err)
3987                 goto ex_abort;
3988
3989         atomic_dec(&qp->mtt->ref_count);
3990         atomic_dec(&qp->rcq->ref_count);
3991         atomic_dec(&qp->scq->ref_count);
3992         if (qp->srq)
3993                 atomic_dec(&qp->srq->ref_count);
3994         res_end_move(dev, slave, RES_QP, qpn);
3995         return 0;
3996
3997 ex_abort:
3998         res_abort_move(dev, slave, RES_QP, qpn);
3999
4000         return err;
4001 }
4002
4003 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4004                                 struct res_qp *rqp, u8 *gid)
4005 {
4006         struct res_gid *res;
4007
4008         list_for_each_entry(res, &rqp->mcg_list, list) {
4009                 if (!memcmp(res->gid, gid, 16))
4010                         return res;
4011         }
4012         return NULL;
4013 }
4014
4015 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4016                        u8 *gid, enum mlx4_protocol prot,
4017                        enum mlx4_steer_type steer, u64 reg_id)
4018 {
4019         struct res_gid *res;
4020         int err;
4021
4022         res = kzalloc(sizeof(*res), GFP_KERNEL);
4023         if (!res)
4024                 return -ENOMEM;
4025
4026         spin_lock_irq(&rqp->mcg_spl);
4027         if (find_gid(dev, slave, rqp, gid)) {
4028                 kfree(res);
4029                 err = -EEXIST;
4030         } else {
4031                 memcpy(res->gid, gid, 16);
4032                 res->prot = prot;
4033                 res->steer = steer;
4034                 res->reg_id = reg_id;
4035                 list_add_tail(&res->list, &rqp->mcg_list);
4036                 err = 0;
4037         }
4038         spin_unlock_irq(&rqp->mcg_spl);
4039
4040         return err;
4041 }
4042
4043 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4044                        u8 *gid, enum mlx4_protocol prot,
4045                        enum mlx4_steer_type steer, u64 *reg_id)
4046 {
4047         struct res_gid *res;
4048         int err;
4049
4050         spin_lock_irq(&rqp->mcg_spl);
4051         res = find_gid(dev, slave, rqp, gid);
4052         if (!res || res->prot != prot || res->steer != steer)
4053                 err = -EINVAL;
4054         else {
4055                 *reg_id = res->reg_id;
4056                 list_del(&res->list);
4057                 kfree(res);
4058                 err = 0;
4059         }
4060         spin_unlock_irq(&rqp->mcg_spl);
4061
4062         return err;
4063 }
4064
4065 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4066                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4067                      enum mlx4_steer_type type, u64 *reg_id)
4068 {
4069         switch (dev->caps.steering_mode) {
4070         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4071                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4072                 if (port < 0)
4073                         return port;
4074                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4075                                                 block_loopback, prot,
4076                                                 reg_id);
4077         }
4078         case MLX4_STEERING_MODE_B0:
4079                 if (prot == MLX4_PROT_ETH) {
4080                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4081                         if (port < 0)
4082                                 return port;
4083                         gid[5] = port;
4084                 }
4085                 return mlx4_qp_attach_common(dev, qp, gid,
4086                                             block_loopback, prot, type);
4087         default:
4088                 return -EINVAL;
4089         }
4090 }
4091
4092 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4093                      u8 gid[16], enum mlx4_protocol prot,
4094                      enum mlx4_steer_type type, u64 reg_id)
4095 {
4096         switch (dev->caps.steering_mode) {
4097         case MLX4_STEERING_MODE_DEVICE_MANAGED:
4098                 return mlx4_flow_detach(dev, reg_id);
4099         case MLX4_STEERING_MODE_B0:
4100                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4101         default:
4102                 return -EINVAL;
4103         }
4104 }
4105
4106 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4107                             u8 *gid, enum mlx4_protocol prot)
4108 {
4109         int real_port;
4110
4111         if (prot != MLX4_PROT_ETH)
4112                 return 0;
4113
4114         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4115             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4116                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4117                 if (real_port < 0)
4118                         return -EINVAL;
4119                 gid[5] = real_port;
4120         }
4121
4122         return 0;
4123 }
4124
4125 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4126                                struct mlx4_vhcr *vhcr,
4127                                struct mlx4_cmd_mailbox *inbox,
4128                                struct mlx4_cmd_mailbox *outbox,
4129                                struct mlx4_cmd_info *cmd)
4130 {
4131         struct mlx4_qp qp; /* dummy for calling attach/detach */
4132         u8 *gid = inbox->buf;
4133         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4134         int err;
4135         int qpn;
4136         struct res_qp *rqp;
4137         u64 reg_id = 0;
4138         int attach = vhcr->op_modifier;
4139         int block_loopback = vhcr->in_modifier >> 31;
4140         u8 steer_type_mask = 2;
4141         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4142
4143         qpn = vhcr->in_modifier & 0xffffff;
4144         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4145         if (err)
4146                 return err;
4147
4148         qp.qpn = qpn;
4149         if (attach) {
4150                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4151                                 type, &reg_id);
4152                 if (err) {
4153                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4154                         goto ex_put;
4155                 }
4156                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4157                 if (err)
4158                         goto ex_detach;
4159         } else {
4160                 err = mlx4_adjust_port(dev, slave, gid, prot);
4161                 if (err)
4162                         goto ex_put;
4163
4164                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4165                 if (err)
4166                         goto ex_put;
4167
4168                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4169                 if (err)
4170                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4171                                qpn, reg_id);
4172         }
4173         put_res(dev, slave, qpn, RES_QP);
4174         return err;
4175
4176 ex_detach:
4177         qp_detach(dev, &qp, gid, prot, type, reg_id);
4178 ex_put:
4179         put_res(dev, slave, qpn, RES_QP);
4180         return err;
4181 }
4182
4183 /*
4184  * MAC validation for Flow Steering rules.
4185  * VF can attach rules only with a mac address which is assigned to it.
4186  */
4187 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4188                                    struct list_head *rlist)
4189 {
4190         struct mac_res *res, *tmp;
4191         __be64 be_mac;
4192
4193         /* make sure it isn't multicast or broadcast mac*/
4194         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4195             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4196                 list_for_each_entry_safe(res, tmp, rlist, list) {
4197                         be_mac = cpu_to_be64(res->mac << 16);
4198                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4199                                 return 0;
4200                 }
4201                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4202                        eth_header->eth.dst_mac, slave);
4203                 return -EINVAL;
4204         }
4205         return 0;
4206 }
4207
4208 /*
4209  * In case of missing eth header, append eth header with a MAC address
4210  * assigned to the VF.
4211  */
4212 static int add_eth_header(struct mlx4_dev *dev, int slave,
4213                           struct mlx4_cmd_mailbox *inbox,
4214                           struct list_head *rlist, int header_id)
4215 {
4216         struct mac_res *res, *tmp;
4217         u8 port;
4218         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4219         struct mlx4_net_trans_rule_hw_eth *eth_header;
4220         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4221         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4222         __be64 be_mac = 0;
4223         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4224
4225         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4226         port = ctrl->port;
4227         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4228
4229         /* Clear a space in the inbox for eth header */
4230         switch (header_id) {
4231         case MLX4_NET_TRANS_RULE_ID_IPV4:
4232                 ip_header =
4233                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4234                 memmove(ip_header, eth_header,
4235                         sizeof(*ip_header) + sizeof(*l4_header));
4236                 break;
4237         case MLX4_NET_TRANS_RULE_ID_TCP:
4238         case MLX4_NET_TRANS_RULE_ID_UDP:
4239                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4240                             (eth_header + 1);
4241                 memmove(l4_header, eth_header, sizeof(*l4_header));
4242                 break;
4243         default:
4244                 return -EINVAL;
4245         }
4246         list_for_each_entry_safe(res, tmp, rlist, list) {
4247                 if (port == res->port) {
4248                         be_mac = cpu_to_be64(res->mac << 16);
4249                         break;
4250                 }
4251         }
4252         if (!be_mac) {
4253                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4254                        port);
4255                 return -EINVAL;
4256         }
4257
4258         memset(eth_header, 0, sizeof(*eth_header));
4259         eth_header->size = sizeof(*eth_header) >> 2;
4260         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4261         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4262         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4263
4264         return 0;
4265
4266 }
4267
4268 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4269         1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4270         1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4271 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4272                            struct mlx4_vhcr *vhcr,
4273                            struct mlx4_cmd_mailbox *inbox,
4274                            struct mlx4_cmd_mailbox *outbox,
4275                            struct mlx4_cmd_info *cmd_info)
4276 {
4277         int err;
4278         u32 qpn = vhcr->in_modifier & 0xffffff;
4279         struct res_qp *rqp;
4280         u64 mac;
4281         unsigned port;
4282         u64 pri_addr_path_mask;
4283         struct mlx4_update_qp_context *cmd;
4284         int smac_index;
4285
4286         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4287
4288         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4289         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4290             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4291                 return -EPERM;
4292
4293         if ((pri_addr_path_mask &
4294              (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4295                 !(dev->caps.flags2 &
4296                   MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4297                 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4298                           slave);
4299                 return -EOPNOTSUPP;
4300         }
4301
4302         /* Just change the smac for the QP */
4303         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4304         if (err) {
4305                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4306                 return err;
4307         }
4308
4309         port = (rqp->sched_queue >> 6 & 1) + 1;
4310
4311         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4312                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4313                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4314                                                 smac_index, &mac);
4315
4316                 if (err) {
4317                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4318                                  qpn, smac_index);
4319                         goto err_mac;
4320                 }
4321         }
4322
4323         err = mlx4_cmd(dev, inbox->dma,
4324                        vhcr->in_modifier, 0,
4325                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4326                        MLX4_CMD_NATIVE);
4327         if (err) {
4328                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4329                 goto err_mac;
4330         }
4331
4332 err_mac:
4333         put_res(dev, slave, qpn, RES_QP);
4334         return err;
4335 }
4336
4337 static u32 qp_attach_mbox_size(void *mbox)
4338 {
4339         u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4340         struct _rule_hw  *rule_header;
4341
4342         rule_header = (struct _rule_hw *)(mbox + size);
4343
4344         while (rule_header->size) {
4345                 size += rule_header->size * sizeof(u32);
4346                 rule_header += 1;
4347         }
4348         return size;
4349 }
4350
4351 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4352
4353 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4354                                          struct mlx4_vhcr *vhcr,
4355                                          struct mlx4_cmd_mailbox *inbox,
4356                                          struct mlx4_cmd_mailbox *outbox,
4357                                          struct mlx4_cmd_info *cmd)
4358 {
4359
4360         struct mlx4_priv *priv = mlx4_priv(dev);
4361         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4362         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4363         int err;
4364         int qpn;
4365         struct res_qp *rqp;
4366         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4367         struct _rule_hw  *rule_header;
4368         int header_id;
4369         struct res_fs_rule *rrule;
4370         u32 mbox_size;
4371
4372         if (dev->caps.steering_mode !=
4373             MLX4_STEERING_MODE_DEVICE_MANAGED)
4374                 return -EOPNOTSUPP;
4375
4376         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4377         err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4378         if (err <= 0)
4379                 return -EINVAL;
4380         ctrl->port = err;
4381         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4382         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4383         if (err) {
4384                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4385                 return err;
4386         }
4387         rule_header = (struct _rule_hw *)(ctrl + 1);
4388         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4389
4390         if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4391                 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4392
4393         switch (header_id) {
4394         case MLX4_NET_TRANS_RULE_ID_ETH:
4395                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4396                         err = -EINVAL;
4397                         goto err_put_qp;
4398                 }
4399                 break;
4400         case MLX4_NET_TRANS_RULE_ID_IB:
4401                 break;
4402         case MLX4_NET_TRANS_RULE_ID_IPV4:
4403         case MLX4_NET_TRANS_RULE_ID_TCP:
4404         case MLX4_NET_TRANS_RULE_ID_UDP:
4405                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4406                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4407                         err = -EINVAL;
4408                         goto err_put_qp;
4409                 }
4410                 vhcr->in_modifier +=
4411                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4412                 break;
4413         default:
4414                 pr_err("Corrupted mailbox\n");
4415                 err = -EINVAL;
4416                 goto err_put_qp;
4417         }
4418
4419         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4420                            vhcr->in_modifier, 0,
4421                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4422                            MLX4_CMD_NATIVE);
4423         if (err)
4424                 goto err_put_qp;
4425
4426
4427         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4428         if (err) {
4429                 mlx4_err(dev, "Fail to add flow steering resources\n");
4430                 goto err_detach;
4431         }
4432
4433         err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4434         if (err)
4435                 goto err_detach;
4436
4437         mbox_size = qp_attach_mbox_size(inbox->buf);
4438         rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4439         if (!rrule->mirr_mbox) {
4440                 err = -ENOMEM;
4441                 goto err_put_rule;
4442         }
4443         rrule->mirr_mbox_size = mbox_size;
4444         rrule->mirr_rule_id = 0;
4445         memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4446
4447         /* set different port */
4448         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4449         if (ctrl->port == 1)
4450                 ctrl->port = 2;
4451         else
4452                 ctrl->port = 1;
4453
4454         if (mlx4_is_bonded(dev))
4455                 mlx4_do_mirror_rule(dev, rrule);
4456
4457         atomic_inc(&rqp->ref_count);
4458
4459 err_put_rule:
4460         put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4461 err_detach:
4462         /* detach rule on error */
4463         if (err)
4464                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4465                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4466                          MLX4_CMD_NATIVE);
4467 err_put_qp:
4468         put_res(dev, slave, qpn, RES_QP);
4469         return err;
4470 }
4471
4472 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4473 {
4474         int err;
4475
4476         err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4477         if (err) {
4478                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4479                 return err;
4480         }
4481
4482         mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4483                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4484         return 0;
4485 }
4486
4487 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4488                                          struct mlx4_vhcr *vhcr,
4489                                          struct mlx4_cmd_mailbox *inbox,
4490                                          struct mlx4_cmd_mailbox *outbox,
4491                                          struct mlx4_cmd_info *cmd)
4492 {
4493         int err;
4494         struct res_qp *rqp;
4495         struct res_fs_rule *rrule;
4496         u64 mirr_reg_id;
4497         int qpn;
4498
4499         if (dev->caps.steering_mode !=
4500             MLX4_STEERING_MODE_DEVICE_MANAGED)
4501                 return -EOPNOTSUPP;
4502
4503         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4504         if (err)
4505                 return err;
4506
4507         if (!rrule->mirr_mbox) {
4508                 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4509                 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4510                 return -EINVAL;
4511         }
4512         mirr_reg_id = rrule->mirr_rule_id;
4513         kfree(rrule->mirr_mbox);
4514         qpn = rrule->qpn;
4515
4516         /* Release the rule form busy state before removal */
4517         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4518         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4519         if (err)
4520                 return err;
4521
4522         if (mirr_reg_id && mlx4_is_bonded(dev)) {
4523                 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4524                 if (err) {
4525                         mlx4_err(dev, "Fail to get resource of mirror rule\n");
4526                 } else {
4527                         put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4528                         mlx4_undo_mirror_rule(dev, rrule);
4529                 }
4530         }
4531         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4532         if (err) {
4533                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4534                 goto out;
4535         }
4536
4537         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4538                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4539                        MLX4_CMD_NATIVE);
4540         if (!err)
4541                 atomic_dec(&rqp->ref_count);
4542 out:
4543         put_res(dev, slave, qpn, RES_QP);
4544         return err;
4545 }
4546
4547 enum {
4548         BUSY_MAX_RETRIES = 10
4549 };
4550
4551 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4552                                struct mlx4_vhcr *vhcr,
4553                                struct mlx4_cmd_mailbox *inbox,
4554                                struct mlx4_cmd_mailbox *outbox,
4555                                struct mlx4_cmd_info *cmd)
4556 {
4557         int err;
4558         int index = vhcr->in_modifier & 0xffff;
4559
4560         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4561         if (err)
4562                 return err;
4563
4564         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4565         put_res(dev, slave, index, RES_COUNTER);
4566         return err;
4567 }
4568
4569 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4570 {
4571         struct res_gid *rgid;
4572         struct res_gid *tmp;
4573         struct mlx4_qp qp; /* dummy for calling attach/detach */
4574
4575         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4576                 switch (dev->caps.steering_mode) {
4577                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4578                         mlx4_flow_detach(dev, rgid->reg_id);
4579                         break;
4580                 case MLX4_STEERING_MODE_B0:
4581                         qp.qpn = rqp->local_qpn;
4582                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4583                                                      rgid->prot, rgid->steer);
4584                         break;
4585                 }
4586                 list_del(&rgid->list);
4587                 kfree(rgid);
4588         }
4589 }
4590
4591 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4592                           enum mlx4_resource type, int print)
4593 {
4594         struct mlx4_priv *priv = mlx4_priv(dev);
4595         struct mlx4_resource_tracker *tracker =
4596                 &priv->mfunc.master.res_tracker;
4597         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4598         struct res_common *r;
4599         struct res_common *tmp;
4600         int busy;
4601
4602         busy = 0;
4603         spin_lock_irq(mlx4_tlock(dev));
4604         list_for_each_entry_safe(r, tmp, rlist, list) {
4605                 if (r->owner == slave) {
4606                         if (!r->removing) {
4607                                 if (r->state == RES_ANY_BUSY) {
4608                                         if (print)
4609                                                 mlx4_dbg(dev,
4610                                                          "%s id 0x%llx is busy\n",
4611                                                           resource_str(type),
4612                                                           r->res_id);
4613                                         ++busy;
4614                                 } else {
4615                                         r->from_state = r->state;
4616                                         r->state = RES_ANY_BUSY;
4617                                         r->removing = 1;
4618                                 }
4619                         }
4620                 }
4621         }
4622         spin_unlock_irq(mlx4_tlock(dev));
4623
4624         return busy;
4625 }
4626
4627 static int move_all_busy(struct mlx4_dev *dev, int slave,
4628                          enum mlx4_resource type)
4629 {
4630         unsigned long begin;
4631         int busy;
4632
4633         begin = jiffies;
4634         do {
4635                 busy = _move_all_busy(dev, slave, type, 0);
4636                 if (time_after(jiffies, begin + 5 * HZ))
4637                         break;
4638                 if (busy)
4639                         cond_resched();
4640         } while (busy);
4641
4642         if (busy)
4643                 busy = _move_all_busy(dev, slave, type, 1);
4644
4645         return busy;
4646 }
4647 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4648 {
4649         struct mlx4_priv *priv = mlx4_priv(dev);
4650         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4651         struct list_head *qp_list =
4652                 &tracker->slave_list[slave].res_list[RES_QP];
4653         struct res_qp *qp;
4654         struct res_qp *tmp;
4655         int state;
4656         u64 in_param;
4657         int qpn;
4658         int err;
4659
4660         err = move_all_busy(dev, slave, RES_QP);
4661         if (err)
4662                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4663                           slave);
4664
4665         spin_lock_irq(mlx4_tlock(dev));
4666         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4667                 spin_unlock_irq(mlx4_tlock(dev));
4668                 if (qp->com.owner == slave) {
4669                         qpn = qp->com.res_id;
4670                         detach_qp(dev, slave, qp);
4671                         state = qp->com.from_state;
4672                         while (state != 0) {
4673                                 switch (state) {
4674                                 case RES_QP_RESERVED:
4675                                         spin_lock_irq(mlx4_tlock(dev));
4676                                         rb_erase(&qp->com.node,
4677                                                  &tracker->res_tree[RES_QP]);
4678                                         list_del(&qp->com.list);
4679                                         spin_unlock_irq(mlx4_tlock(dev));
4680                                         if (!valid_reserved(dev, slave, qpn)) {
4681                                                 __mlx4_qp_release_range(dev, qpn, 1);
4682                                                 mlx4_release_resource(dev, slave,
4683                                                                       RES_QP, 1, 0);
4684                                         }
4685                                         kfree(qp);
4686                                         state = 0;
4687                                         break;
4688                                 case RES_QP_MAPPED:
4689                                         if (!valid_reserved(dev, slave, qpn))
4690                                                 __mlx4_qp_free_icm(dev, qpn);
4691                                         state = RES_QP_RESERVED;
4692                                         break;
4693                                 case RES_QP_HW:
4694                                         in_param = slave;
4695                                         err = mlx4_cmd(dev, in_param,
4696                                                        qp->local_qpn, 2,
4697                                                        MLX4_CMD_2RST_QP,
4698                                                        MLX4_CMD_TIME_CLASS_A,
4699                                                        MLX4_CMD_NATIVE);
4700                                         if (err)
4701                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4702                                                          slave, qp->local_qpn);
4703                                         atomic_dec(&qp->rcq->ref_count);
4704                                         atomic_dec(&qp->scq->ref_count);
4705                                         atomic_dec(&qp->mtt->ref_count);
4706                                         if (qp->srq)
4707                                                 atomic_dec(&qp->srq->ref_count);
4708                                         state = RES_QP_MAPPED;
4709                                         break;
4710                                 default:
4711                                         state = 0;
4712                                 }
4713                         }
4714                 }
4715                 spin_lock_irq(mlx4_tlock(dev));
4716         }
4717         spin_unlock_irq(mlx4_tlock(dev));
4718 }
4719
4720 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4721 {
4722         struct mlx4_priv *priv = mlx4_priv(dev);
4723         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4724         struct list_head *srq_list =
4725                 &tracker->slave_list[slave].res_list[RES_SRQ];
4726         struct res_srq *srq;
4727         struct res_srq *tmp;
4728         int state;
4729         u64 in_param;
4730         LIST_HEAD(tlist);
4731         int srqn;
4732         int err;
4733
4734         err = move_all_busy(dev, slave, RES_SRQ);
4735         if (err)
4736                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4737                           slave);
4738
4739         spin_lock_irq(mlx4_tlock(dev));
4740         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4741                 spin_unlock_irq(mlx4_tlock(dev));
4742                 if (srq->com.owner == slave) {
4743                         srqn = srq->com.res_id;
4744                         state = srq->com.from_state;
4745                         while (state != 0) {
4746                                 switch (state) {
4747                                 case RES_SRQ_ALLOCATED:
4748                                         __mlx4_srq_free_icm(dev, srqn);
4749                                         spin_lock_irq(mlx4_tlock(dev));
4750                                         rb_erase(&srq->com.node,
4751                                                  &tracker->res_tree[RES_SRQ]);
4752                                         list_del(&srq->com.list);
4753                                         spin_unlock_irq(mlx4_tlock(dev));
4754                                         mlx4_release_resource(dev, slave,
4755                                                               RES_SRQ, 1, 0);
4756                                         kfree(srq);
4757                                         state = 0;
4758                                         break;
4759
4760                                 case RES_SRQ_HW:
4761                                         in_param = slave;
4762                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4763                                                        MLX4_CMD_HW2SW_SRQ,
4764                                                        MLX4_CMD_TIME_CLASS_A,
4765                                                        MLX4_CMD_NATIVE);
4766                                         if (err)
4767                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4768                                                          slave, srqn);
4769
4770                                         atomic_dec(&srq->mtt->ref_count);
4771                                         if (srq->cq)
4772                                                 atomic_dec(&srq->cq->ref_count);
4773                                         state = RES_SRQ_ALLOCATED;
4774                                         break;
4775
4776                                 default:
4777                                         state = 0;
4778                                 }
4779                         }
4780                 }
4781                 spin_lock_irq(mlx4_tlock(dev));
4782         }
4783         spin_unlock_irq(mlx4_tlock(dev));
4784 }
4785
4786 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4787 {
4788         struct mlx4_priv *priv = mlx4_priv(dev);
4789         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4790         struct list_head *cq_list =
4791                 &tracker->slave_list[slave].res_list[RES_CQ];
4792         struct res_cq *cq;
4793         struct res_cq *tmp;
4794         int state;
4795         u64 in_param;
4796         LIST_HEAD(tlist);
4797         int cqn;
4798         int err;
4799
4800         err = move_all_busy(dev, slave, RES_CQ);
4801         if (err)
4802                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4803                           slave);
4804
4805         spin_lock_irq(mlx4_tlock(dev));
4806         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4807                 spin_unlock_irq(mlx4_tlock(dev));
4808                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4809                         cqn = cq->com.res_id;
4810                         state = cq->com.from_state;
4811                         while (state != 0) {
4812                                 switch (state) {
4813                                 case RES_CQ_ALLOCATED:
4814                                         __mlx4_cq_free_icm(dev, cqn);
4815                                         spin_lock_irq(mlx4_tlock(dev));
4816                                         rb_erase(&cq->com.node,
4817                                                  &tracker->res_tree[RES_CQ]);
4818                                         list_del(&cq->com.list);
4819                                         spin_unlock_irq(mlx4_tlock(dev));
4820                                         mlx4_release_resource(dev, slave,
4821                                                               RES_CQ, 1, 0);
4822                                         kfree(cq);
4823                                         state = 0;
4824                                         break;
4825
4826                                 case RES_CQ_HW:
4827                                         in_param = slave;
4828                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4829                                                        MLX4_CMD_HW2SW_CQ,
4830                                                        MLX4_CMD_TIME_CLASS_A,
4831                                                        MLX4_CMD_NATIVE);
4832                                         if (err)
4833                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4834                                                          slave, cqn);
4835                                         atomic_dec(&cq->mtt->ref_count);
4836                                         state = RES_CQ_ALLOCATED;
4837                                         break;
4838
4839                                 default:
4840                                         state = 0;
4841                                 }
4842                         }
4843                 }
4844                 spin_lock_irq(mlx4_tlock(dev));
4845         }
4846         spin_unlock_irq(mlx4_tlock(dev));
4847 }
4848
4849 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4850 {
4851         struct mlx4_priv *priv = mlx4_priv(dev);
4852         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4853         struct list_head *mpt_list =
4854                 &tracker->slave_list[slave].res_list[RES_MPT];
4855         struct res_mpt *mpt;
4856         struct res_mpt *tmp;
4857         int state;
4858         u64 in_param;
4859         LIST_HEAD(tlist);
4860         int mptn;
4861         int err;
4862
4863         err = move_all_busy(dev, slave, RES_MPT);
4864         if (err)
4865                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4866                           slave);
4867
4868         spin_lock_irq(mlx4_tlock(dev));
4869         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4870                 spin_unlock_irq(mlx4_tlock(dev));
4871                 if (mpt->com.owner == slave) {
4872                         mptn = mpt->com.res_id;
4873                         state = mpt->com.from_state;
4874                         while (state != 0) {
4875                                 switch (state) {
4876                                 case RES_MPT_RESERVED:
4877                                         __mlx4_mpt_release(dev, mpt->key);
4878                                         spin_lock_irq(mlx4_tlock(dev));
4879                                         rb_erase(&mpt->com.node,
4880                                                  &tracker->res_tree[RES_MPT]);
4881                                         list_del(&mpt->com.list);
4882                                         spin_unlock_irq(mlx4_tlock(dev));
4883                                         mlx4_release_resource(dev, slave,
4884                                                               RES_MPT, 1, 0);
4885                                         kfree(mpt);
4886                                         state = 0;
4887                                         break;
4888
4889                                 case RES_MPT_MAPPED:
4890                                         __mlx4_mpt_free_icm(dev, mpt->key);
4891                                         state = RES_MPT_RESERVED;
4892                                         break;
4893
4894                                 case RES_MPT_HW:
4895                                         in_param = slave;
4896                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4897                                                      MLX4_CMD_HW2SW_MPT,
4898                                                      MLX4_CMD_TIME_CLASS_A,
4899                                                      MLX4_CMD_NATIVE);
4900                                         if (err)
4901                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4902                                                          slave, mptn);
4903                                         if (mpt->mtt)
4904                                                 atomic_dec(&mpt->mtt->ref_count);
4905                                         state = RES_MPT_MAPPED;
4906                                         break;
4907                                 default:
4908                                         state = 0;
4909                                 }
4910                         }
4911                 }
4912                 spin_lock_irq(mlx4_tlock(dev));
4913         }
4914         spin_unlock_irq(mlx4_tlock(dev));
4915 }
4916
4917 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4918 {
4919         struct mlx4_priv *priv = mlx4_priv(dev);
4920         struct mlx4_resource_tracker *tracker =
4921                 &priv->mfunc.master.res_tracker;
4922         struct list_head *mtt_list =
4923                 &tracker->slave_list[slave].res_list[RES_MTT];
4924         struct res_mtt *mtt;
4925         struct res_mtt *tmp;
4926         int state;
4927         LIST_HEAD(tlist);
4928         int base;
4929         int err;
4930
4931         err = move_all_busy(dev, slave, RES_MTT);
4932         if (err)
4933                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4934                           slave);
4935
4936         spin_lock_irq(mlx4_tlock(dev));
4937         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4938                 spin_unlock_irq(mlx4_tlock(dev));
4939                 if (mtt->com.owner == slave) {
4940                         base = mtt->com.res_id;
4941                         state = mtt->com.from_state;
4942                         while (state != 0) {
4943                                 switch (state) {
4944                                 case RES_MTT_ALLOCATED:
4945                                         __mlx4_free_mtt_range(dev, base,
4946                                                               mtt->order);
4947                                         spin_lock_irq(mlx4_tlock(dev));
4948                                         rb_erase(&mtt->com.node,
4949                                                  &tracker->res_tree[RES_MTT]);
4950                                         list_del(&mtt->com.list);
4951                                         spin_unlock_irq(mlx4_tlock(dev));
4952                                         mlx4_release_resource(dev, slave, RES_MTT,
4953                                                               1 << mtt->order, 0);
4954                                         kfree(mtt);
4955                                         state = 0;
4956                                         break;
4957
4958                                 default:
4959                                         state = 0;
4960                                 }
4961                         }
4962                 }
4963                 spin_lock_irq(mlx4_tlock(dev));
4964         }
4965         spin_unlock_irq(mlx4_tlock(dev));
4966 }
4967
4968 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4969 {
4970         struct mlx4_cmd_mailbox *mailbox;
4971         int err;
4972         struct res_fs_rule *mirr_rule;
4973         u64 reg_id;
4974
4975         mailbox = mlx4_alloc_cmd_mailbox(dev);
4976         if (IS_ERR(mailbox))
4977                 return PTR_ERR(mailbox);
4978
4979         if (!fs_rule->mirr_mbox) {
4980                 mlx4_err(dev, "rule mirroring mailbox is null\n");
4981                 return -EINVAL;
4982         }
4983         memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4984         err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4985                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4986                            MLX4_CMD_NATIVE);
4987         mlx4_free_cmd_mailbox(dev, mailbox);
4988
4989         if (err)
4990                 goto err;
4991
4992         err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4993         if (err)
4994                 goto err_detach;
4995
4996         err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4997         if (err)
4998                 goto err_rem;
4999
5000         fs_rule->mirr_rule_id = reg_id;
5001         mirr_rule->mirr_rule_id = 0;
5002         mirr_rule->mirr_mbox_size = 0;
5003         mirr_rule->mirr_mbox = NULL;
5004         put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5005
5006         return 0;
5007 err_rem:
5008         rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5009 err_detach:
5010         mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5011                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5012 err:
5013         return err;
5014 }
5015
5016 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5017 {
5018         struct mlx4_priv *priv = mlx4_priv(dev);
5019         struct mlx4_resource_tracker *tracker =
5020                 &priv->mfunc.master.res_tracker;
5021         struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5022         struct rb_node *p;
5023         struct res_fs_rule *fs_rule;
5024         int err = 0;
5025         LIST_HEAD(mirr_list);
5026
5027         for (p = rb_first(root); p; p = rb_next(p)) {
5028                 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5029                 if ((bond && fs_rule->mirr_mbox_size) ||
5030                     (!bond && !fs_rule->mirr_mbox_size))
5031                         list_add_tail(&fs_rule->mirr_list, &mirr_list);
5032         }
5033
5034         list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5035                 if (bond)
5036                         err += mlx4_do_mirror_rule(dev, fs_rule);
5037                 else
5038                         err += mlx4_undo_mirror_rule(dev, fs_rule);
5039         }
5040         return err;
5041 }
5042
5043 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5044 {
5045         return mlx4_mirror_fs_rules(dev, true);
5046 }
5047
5048 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5049 {
5050         return mlx4_mirror_fs_rules(dev, false);
5051 }
5052
5053 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5054 {
5055         struct mlx4_priv *priv = mlx4_priv(dev);
5056         struct mlx4_resource_tracker *tracker =
5057                 &priv->mfunc.master.res_tracker;
5058         struct list_head *fs_rule_list =
5059                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5060         struct res_fs_rule *fs_rule;
5061         struct res_fs_rule *tmp;
5062         int state;
5063         u64 base;
5064         int err;
5065
5066         err = move_all_busy(dev, slave, RES_FS_RULE);
5067         if (err)
5068                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5069                           slave);
5070
5071         spin_lock_irq(mlx4_tlock(dev));
5072         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5073                 spin_unlock_irq(mlx4_tlock(dev));
5074                 if (fs_rule->com.owner == slave) {
5075                         base = fs_rule->com.res_id;
5076                         state = fs_rule->com.from_state;
5077                         while (state != 0) {
5078                                 switch (state) {
5079                                 case RES_FS_RULE_ALLOCATED:
5080                                         /* detach rule */
5081                                         err = mlx4_cmd(dev, base, 0, 0,
5082                                                        MLX4_QP_FLOW_STEERING_DETACH,
5083                                                        MLX4_CMD_TIME_CLASS_A,
5084                                                        MLX4_CMD_NATIVE);
5085
5086                                         spin_lock_irq(mlx4_tlock(dev));
5087                                         rb_erase(&fs_rule->com.node,
5088                                                  &tracker->res_tree[RES_FS_RULE]);
5089                                         list_del(&fs_rule->com.list);
5090                                         spin_unlock_irq(mlx4_tlock(dev));
5091                                         kfree(fs_rule->mirr_mbox);
5092                                         kfree(fs_rule);
5093                                         state = 0;
5094                                         break;
5095
5096                                 default:
5097                                         state = 0;
5098                                 }
5099                         }
5100                 }
5101                 spin_lock_irq(mlx4_tlock(dev));
5102         }
5103         spin_unlock_irq(mlx4_tlock(dev));
5104 }
5105
5106 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5107 {
5108         struct mlx4_priv *priv = mlx4_priv(dev);
5109         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5110         struct list_head *eq_list =
5111                 &tracker->slave_list[slave].res_list[RES_EQ];
5112         struct res_eq *eq;
5113         struct res_eq *tmp;
5114         int err;
5115         int state;
5116         LIST_HEAD(tlist);
5117         int eqn;
5118
5119         err = move_all_busy(dev, slave, RES_EQ);
5120         if (err)
5121                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5122                           slave);
5123
5124         spin_lock_irq(mlx4_tlock(dev));
5125         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5126                 spin_unlock_irq(mlx4_tlock(dev));
5127                 if (eq->com.owner == slave) {
5128                         eqn = eq->com.res_id;
5129                         state = eq->com.from_state;
5130                         while (state != 0) {
5131                                 switch (state) {
5132                                 case RES_EQ_RESERVED:
5133                                         spin_lock_irq(mlx4_tlock(dev));
5134                                         rb_erase(&eq->com.node,
5135                                                  &tracker->res_tree[RES_EQ]);
5136                                         list_del(&eq->com.list);
5137                                         spin_unlock_irq(mlx4_tlock(dev));
5138                                         kfree(eq);
5139                                         state = 0;
5140                                         break;
5141
5142                                 case RES_EQ_HW:
5143                                         err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5144                                                        1, MLX4_CMD_HW2SW_EQ,
5145                                                        MLX4_CMD_TIME_CLASS_A,
5146                                                        MLX4_CMD_NATIVE);
5147                                         if (err)
5148                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5149                                                          slave, eqn & 0x3ff);
5150                                         atomic_dec(&eq->mtt->ref_count);
5151                                         state = RES_EQ_RESERVED;
5152                                         break;
5153
5154                                 default:
5155                                         state = 0;
5156                                 }
5157                         }
5158                 }
5159                 spin_lock_irq(mlx4_tlock(dev));
5160         }
5161         spin_unlock_irq(mlx4_tlock(dev));
5162 }
5163
5164 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5165 {
5166         struct mlx4_priv *priv = mlx4_priv(dev);
5167         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5168         struct list_head *counter_list =
5169                 &tracker->slave_list[slave].res_list[RES_COUNTER];
5170         struct res_counter *counter;
5171         struct res_counter *tmp;
5172         int err;
5173         int *counters_arr = NULL;
5174         int i, j;
5175
5176         err = move_all_busy(dev, slave, RES_COUNTER);
5177         if (err)
5178                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5179                           slave);
5180
5181         counters_arr = kmalloc_array(dev->caps.max_counters,
5182                                      sizeof(*counters_arr), GFP_KERNEL);
5183         if (!counters_arr)
5184                 return;
5185
5186         do {
5187                 i = 0;
5188                 j = 0;
5189                 spin_lock_irq(mlx4_tlock(dev));
5190                 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5191                         if (counter->com.owner == slave) {
5192                                 counters_arr[i++] = counter->com.res_id;
5193                                 rb_erase(&counter->com.node,
5194                                          &tracker->res_tree[RES_COUNTER]);
5195                                 list_del(&counter->com.list);
5196                                 kfree(counter);
5197                         }
5198                 }
5199                 spin_unlock_irq(mlx4_tlock(dev));
5200
5201                 while (j < i) {
5202                         __mlx4_counter_free(dev, counters_arr[j++]);
5203                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5204                 }
5205         } while (i);
5206
5207         kfree(counters_arr);
5208 }
5209
5210 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5211 {
5212         struct mlx4_priv *priv = mlx4_priv(dev);
5213         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5214         struct list_head *xrcdn_list =
5215                 &tracker->slave_list[slave].res_list[RES_XRCD];
5216         struct res_xrcdn *xrcd;
5217         struct res_xrcdn *tmp;
5218         int err;
5219         int xrcdn;
5220
5221         err = move_all_busy(dev, slave, RES_XRCD);
5222         if (err)
5223                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5224                           slave);
5225
5226         spin_lock_irq(mlx4_tlock(dev));
5227         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5228                 if (xrcd->com.owner == slave) {
5229                         xrcdn = xrcd->com.res_id;
5230                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5231                         list_del(&xrcd->com.list);
5232                         kfree(xrcd);
5233                         __mlx4_xrcd_free(dev, xrcdn);
5234                 }
5235         }
5236         spin_unlock_irq(mlx4_tlock(dev));
5237 }
5238
5239 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5240 {
5241         struct mlx4_priv *priv = mlx4_priv(dev);
5242         mlx4_reset_roce_gids(dev, slave);
5243         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5244         rem_slave_vlans(dev, slave);
5245         rem_slave_macs(dev, slave);
5246         rem_slave_fs_rule(dev, slave);
5247         rem_slave_qps(dev, slave);
5248         rem_slave_srqs(dev, slave);
5249         rem_slave_cqs(dev, slave);
5250         rem_slave_mrs(dev, slave);
5251         rem_slave_eqs(dev, slave);
5252         rem_slave_mtts(dev, slave);
5253         rem_slave_counters(dev, slave);
5254         rem_slave_xrcdns(dev, slave);
5255         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5256 }
5257
5258 static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5259                            struct mlx4_vf_immed_vlan_work *work)
5260 {
5261         ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5262         ctx->qp_context.qos_vport = work->qos_vport;
5263 }
5264
5265 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5266 {
5267         struct mlx4_vf_immed_vlan_work *work =
5268                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5269         struct mlx4_cmd_mailbox *mailbox;
5270         struct mlx4_update_qp_context *upd_context;
5271         struct mlx4_dev *dev = &work->priv->dev;
5272         struct mlx4_resource_tracker *tracker =
5273                 &work->priv->mfunc.master.res_tracker;
5274         struct list_head *qp_list =
5275                 &tracker->slave_list[work->slave].res_list[RES_QP];
5276         struct res_qp *qp;
5277         struct res_qp *tmp;
5278         u64 qp_path_mask_vlan_ctrl =
5279                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5280                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5281                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5282                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5283                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5284                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5285
5286         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5287                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5288                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5289                        (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5290                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5291                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5292                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5293                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5294
5295         int err;
5296         int port, errors = 0;
5297         u8 vlan_control;
5298
5299         if (mlx4_is_slave(dev)) {
5300                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5301                           work->slave);
5302                 goto out;
5303         }
5304
5305         mailbox = mlx4_alloc_cmd_mailbox(dev);
5306         if (IS_ERR(mailbox))
5307                 goto out;
5308         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5309                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5310                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5311                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5312                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5313                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5314                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5315         else if (!work->vlan_id)
5316                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5317                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5318         else if (work->vlan_proto == htons(ETH_P_8021AD))
5319                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5320                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5321                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5322                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5323         else  /* vst 802.1Q */
5324                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5325                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5326                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5327
5328         upd_context = mailbox->buf;
5329         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5330
5331         spin_lock_irq(mlx4_tlock(dev));
5332         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5333                 spin_unlock_irq(mlx4_tlock(dev));
5334                 if (qp->com.owner == work->slave) {
5335                         if (qp->com.from_state != RES_QP_HW ||
5336                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
5337                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5338                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5339                                 spin_lock_irq(mlx4_tlock(dev));
5340                                 continue;
5341                         }
5342                         port = (qp->sched_queue >> 6 & 1) + 1;
5343                         if (port != work->port) {
5344                                 spin_lock_irq(mlx4_tlock(dev));
5345                                 continue;
5346                         }
5347                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5348                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5349                         else
5350                                 upd_context->primary_addr_path_mask =
5351                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5352                         if (work->vlan_id == MLX4_VGT) {
5353                                 upd_context->qp_context.param3 = qp->param3;
5354                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5355                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5356                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5357                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5358                                 upd_context->qp_context.pri_path.feup = qp->feup;
5359                                 upd_context->qp_context.pri_path.sched_queue =
5360                                         qp->sched_queue;
5361                         } else {
5362                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5363                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5364                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5365                                 upd_context->qp_context.pri_path.fvl_rx =
5366                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5367                                 upd_context->qp_context.pri_path.fl =
5368                                         qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5369                                 if (work->vlan_proto == htons(ETH_P_8021AD))
5370                                         upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5371                                 else
5372                                         upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5373                                 upd_context->qp_context.pri_path.feup =
5374                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5375                                 upd_context->qp_context.pri_path.sched_queue =
5376                                         qp->sched_queue & 0xC7;
5377                                 upd_context->qp_context.pri_path.sched_queue |=
5378                                         ((work->qos & 0x7) << 3);
5379
5380                                 if (dev->caps.flags2 &
5381                                     MLX4_DEV_CAP_FLAG2_QOS_VPP)
5382                                         update_qos_vpp(upd_context, work);
5383                         }
5384
5385                         err = mlx4_cmd(dev, mailbox->dma,
5386                                        qp->local_qpn & 0xffffff,
5387                                        0, MLX4_CMD_UPDATE_QP,
5388                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5389                         if (err) {
5390                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5391                                           work->slave, port, qp->local_qpn, err);
5392                                 errors++;
5393                         }
5394                 }
5395                 spin_lock_irq(mlx4_tlock(dev));
5396         }
5397         spin_unlock_irq(mlx4_tlock(dev));
5398         mlx4_free_cmd_mailbox(dev, mailbox);
5399
5400         if (errors)
5401                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5402                          errors, work->slave, work->port);
5403
5404         /* unregister previous vlan_id if needed and we had no errors
5405          * while updating the QPs
5406          */
5407         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5408             NO_INDX != work->orig_vlan_ix)
5409                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5410                                        work->orig_vlan_id);
5411 out:
5412         kfree(work);
5413         return;
5414 }