ipv4: fix address selection in fib_compute_spec_dst
[linux-block.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44
45#include "mlx4.h"
46#include "fw.h"
47
48#define MLX4_MAC_VALID (1ull << 63)
49#define MLX4_MAC_MASK 0x7fffffffffffffffULL
50#define ETH_ALEN 6
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
4af1c048 60 struct rb_node node;
aa1ec3dd 61 u64 res_id;
c82e9aa0
EC
62 int owner;
63 int state;
64 int from_state;
65 int to_state;
66 int removing;
67};
68
69enum {
70 RES_ANY_BUSY = 1
71};
72
73struct res_gid {
74 struct list_head list;
75 u8 gid[16];
76 enum mlx4_protocol prot;
9f5b6c63 77 enum mlx4_steer_type steer;
c82e9aa0
EC
78};
79
80enum res_qp_states {
81 RES_QP_BUSY = RES_ANY_BUSY,
82
83 /* QP number was allocated */
84 RES_QP_RESERVED,
85
86 /* ICM memory for QP context was mapped */
87 RES_QP_MAPPED,
88
89 /* QP is in hw ownership */
90 RES_QP_HW
91};
92
c82e9aa0
EC
93struct res_qp {
94 struct res_common com;
95 struct res_mtt *mtt;
96 struct res_cq *rcq;
97 struct res_cq *scq;
98 struct res_srq *srq;
99 struct list_head mcg_list;
100 spinlock_t mcg_spl;
101 int local_qpn;
102};
103
104enum res_mtt_states {
105 RES_MTT_BUSY = RES_ANY_BUSY,
106 RES_MTT_ALLOCATED,
107};
108
109static inline const char *mtt_states_str(enum res_mtt_states state)
110{
111 switch (state) {
112 case RES_MTT_BUSY: return "RES_MTT_BUSY";
113 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114 default: return "Unknown";
115 }
116}
117
118struct res_mtt {
119 struct res_common com;
120 int order;
121 atomic_t ref_count;
122};
123
124enum res_mpt_states {
125 RES_MPT_BUSY = RES_ANY_BUSY,
126 RES_MPT_RESERVED,
127 RES_MPT_MAPPED,
128 RES_MPT_HW,
129};
130
131struct res_mpt {
132 struct res_common com;
133 struct res_mtt *mtt;
134 int key;
135};
136
137enum res_eq_states {
138 RES_EQ_BUSY = RES_ANY_BUSY,
139 RES_EQ_RESERVED,
140 RES_EQ_HW,
141};
142
143struct res_eq {
144 struct res_common com;
145 struct res_mtt *mtt;
146};
147
148enum res_cq_states {
149 RES_CQ_BUSY = RES_ANY_BUSY,
150 RES_CQ_ALLOCATED,
151 RES_CQ_HW,
152};
153
154struct res_cq {
155 struct res_common com;
156 struct res_mtt *mtt;
157 atomic_t ref_count;
158};
159
160enum res_srq_states {
161 RES_SRQ_BUSY = RES_ANY_BUSY,
162 RES_SRQ_ALLOCATED,
163 RES_SRQ_HW,
164};
165
c82e9aa0
EC
166struct res_srq {
167 struct res_common com;
168 struct res_mtt *mtt;
169 struct res_cq *cq;
170 atomic_t ref_count;
171};
172
173enum res_counter_states {
174 RES_COUNTER_BUSY = RES_ANY_BUSY,
175 RES_COUNTER_ALLOCATED,
176};
177
c82e9aa0
EC
178struct res_counter {
179 struct res_common com;
180 int port;
181};
182
ba062d52
JM
183enum res_xrcdn_states {
184 RES_XRCD_BUSY = RES_ANY_BUSY,
185 RES_XRCD_ALLOCATED,
186};
187
188struct res_xrcdn {
189 struct res_common com;
190 int port;
191};
192
1b9c6b06
HHZ
193enum res_fs_rule_states {
194 RES_FS_RULE_BUSY = RES_ANY_BUSY,
195 RES_FS_RULE_ALLOCATED,
196};
197
198struct res_fs_rule {
199 struct res_common com;
200};
201
4af1c048
HHZ
202static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
203{
204 struct rb_node *node = root->rb_node;
205
206 while (node) {
207 struct res_common *res = container_of(node, struct res_common,
208 node);
209
210 if (res_id < res->res_id)
211 node = node->rb_left;
212 else if (res_id > res->res_id)
213 node = node->rb_right;
214 else
215 return res;
216 }
217 return NULL;
218}
219
220static int res_tracker_insert(struct rb_root *root, struct res_common *res)
221{
222 struct rb_node **new = &(root->rb_node), *parent = NULL;
223
224 /* Figure out where to put new node */
225 while (*new) {
226 struct res_common *this = container_of(*new, struct res_common,
227 node);
228
229 parent = *new;
230 if (res->res_id < this->res_id)
231 new = &((*new)->rb_left);
232 else if (res->res_id > this->res_id)
233 new = &((*new)->rb_right);
234 else
235 return -EEXIST;
236 }
237
238 /* Add new node and rebalance tree. */
239 rb_link_node(&res->node, parent, new);
240 rb_insert_color(&res->node, root);
241
242 return 0;
243}
244
c82e9aa0
EC
245/* For Debug uses */
246static const char *ResourceType(enum mlx4_resource rt)
247{
248 switch (rt) {
249 case RES_QP: return "RES_QP";
250 case RES_CQ: return "RES_CQ";
251 case RES_SRQ: return "RES_SRQ";
252 case RES_MPT: return "RES_MPT";
253 case RES_MTT: return "RES_MTT";
254 case RES_MAC: return "RES_MAC";
255 case RES_EQ: return "RES_EQ";
256 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 257 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 258 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
259 default: return "Unknown resource type !!!";
260 };
261}
262
c82e9aa0
EC
263int mlx4_init_resource_tracker(struct mlx4_dev *dev)
264{
265 struct mlx4_priv *priv = mlx4_priv(dev);
266 int i;
267 int t;
268
269 priv->mfunc.master.res_tracker.slave_list =
270 kzalloc(dev->num_slaves * sizeof(struct slave_list),
271 GFP_KERNEL);
272 if (!priv->mfunc.master.res_tracker.slave_list)
273 return -ENOMEM;
274
275 for (i = 0 ; i < dev->num_slaves; i++) {
276 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
277 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
278 slave_list[i].res_list[t]);
279 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
280 }
281
282 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
283 dev->num_slaves);
284 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 285 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0
EC
286
287 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
288 return 0 ;
289}
290
b8924951
JM
291void mlx4_free_resource_tracker(struct mlx4_dev *dev,
292 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
293{
294 struct mlx4_priv *priv = mlx4_priv(dev);
295 int i;
296
297 if (priv->mfunc.master.res_tracker.slave_list) {
b8924951
JM
298 if (type != RES_TR_FREE_STRUCTS_ONLY)
299 for (i = 0 ; i < dev->num_slaves; i++)
300 if (type == RES_TR_FREE_ALL ||
301 dev->caps.function != i)
302 mlx4_delete_all_resources_for_slave(dev, i);
303
304 if (type != RES_TR_FREE_SLAVES_ONLY) {
305 kfree(priv->mfunc.master.res_tracker.slave_list);
306 priv->mfunc.master.res_tracker.slave_list = NULL;
307 }
c82e9aa0
EC
308 }
309}
310
311static void update_ud_gid(struct mlx4_dev *dev,
312 struct mlx4_qp_context *qp_ctx, u8 slave)
313{
314 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
315
316 if (MLX4_QP_ST_UD == ts)
317 qp_ctx->pri_path.mgid_index = 0x80 | slave;
318
319 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
320 slave, qp_ctx->pri_path.mgid_index);
321}
322
323static int mpt_mask(struct mlx4_dev *dev)
324{
325 return dev->caps.num_mpts - 1;
326}
327
328static void *find_res(struct mlx4_dev *dev, int res_id,
329 enum mlx4_resource type)
330{
331 struct mlx4_priv *priv = mlx4_priv(dev);
332
4af1c048
HHZ
333 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
334 res_id);
c82e9aa0
EC
335}
336
aa1ec3dd 337static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
338 enum mlx4_resource type,
339 void *res)
340{
341 struct res_common *r;
342 int err = 0;
343
344 spin_lock_irq(mlx4_tlock(dev));
345 r = find_res(dev, res_id, type);
346 if (!r) {
347 err = -ENONET;
348 goto exit;
349 }
350
351 if (r->state == RES_ANY_BUSY) {
352 err = -EBUSY;
353 goto exit;
354 }
355
356 if (r->owner != slave) {
357 err = -EPERM;
358 goto exit;
359 }
360
361 r->from_state = r->state;
362 r->state = RES_ANY_BUSY;
aa1ec3dd 363 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
c82e9aa0
EC
364 ResourceType(type), r->res_id);
365
366 if (res)
367 *((struct res_common **)res) = r;
368
369exit:
370 spin_unlock_irq(mlx4_tlock(dev));
371 return err;
372}
373
374int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
375 enum mlx4_resource type,
aa1ec3dd 376 u64 res_id, int *slave)
c82e9aa0
EC
377{
378
379 struct res_common *r;
380 int err = -ENOENT;
381 int id = res_id;
382
383 if (type == RES_QP)
384 id &= 0x7fffff;
996b0541 385 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
386
387 r = find_res(dev, id, type);
388 if (r) {
389 *slave = r->owner;
390 err = 0;
391 }
996b0541 392 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
393
394 return err;
395}
396
aa1ec3dd 397static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
398 enum mlx4_resource type)
399{
400 struct res_common *r;
401
402 spin_lock_irq(mlx4_tlock(dev));
403 r = find_res(dev, res_id, type);
404 if (r)
405 r->state = r->from_state;
406 spin_unlock_irq(mlx4_tlock(dev));
407}
408
409static struct res_common *alloc_qp_tr(int id)
410{
411 struct res_qp *ret;
412
413 ret = kzalloc(sizeof *ret, GFP_KERNEL);
414 if (!ret)
415 return NULL;
416
417 ret->com.res_id = id;
418 ret->com.state = RES_QP_RESERVED;
2531188b 419 ret->local_qpn = id;
c82e9aa0
EC
420 INIT_LIST_HEAD(&ret->mcg_list);
421 spin_lock_init(&ret->mcg_spl);
422
423 return &ret->com;
424}
425
426static struct res_common *alloc_mtt_tr(int id, int order)
427{
428 struct res_mtt *ret;
429
430 ret = kzalloc(sizeof *ret, GFP_KERNEL);
431 if (!ret)
432 return NULL;
433
434 ret->com.res_id = id;
435 ret->order = order;
436 ret->com.state = RES_MTT_ALLOCATED;
437 atomic_set(&ret->ref_count, 0);
438
439 return &ret->com;
440}
441
442static struct res_common *alloc_mpt_tr(int id, int key)
443{
444 struct res_mpt *ret;
445
446 ret = kzalloc(sizeof *ret, GFP_KERNEL);
447 if (!ret)
448 return NULL;
449
450 ret->com.res_id = id;
451 ret->com.state = RES_MPT_RESERVED;
452 ret->key = key;
453
454 return &ret->com;
455}
456
457static struct res_common *alloc_eq_tr(int id)
458{
459 struct res_eq *ret;
460
461 ret = kzalloc(sizeof *ret, GFP_KERNEL);
462 if (!ret)
463 return NULL;
464
465 ret->com.res_id = id;
466 ret->com.state = RES_EQ_RESERVED;
467
468 return &ret->com;
469}
470
471static struct res_common *alloc_cq_tr(int id)
472{
473 struct res_cq *ret;
474
475 ret = kzalloc(sizeof *ret, GFP_KERNEL);
476 if (!ret)
477 return NULL;
478
479 ret->com.res_id = id;
480 ret->com.state = RES_CQ_ALLOCATED;
481 atomic_set(&ret->ref_count, 0);
482
483 return &ret->com;
484}
485
486static struct res_common *alloc_srq_tr(int id)
487{
488 struct res_srq *ret;
489
490 ret = kzalloc(sizeof *ret, GFP_KERNEL);
491 if (!ret)
492 return NULL;
493
494 ret->com.res_id = id;
495 ret->com.state = RES_SRQ_ALLOCATED;
496 atomic_set(&ret->ref_count, 0);
497
498 return &ret->com;
499}
500
501static struct res_common *alloc_counter_tr(int id)
502{
503 struct res_counter *ret;
504
505 ret = kzalloc(sizeof *ret, GFP_KERNEL);
506 if (!ret)
507 return NULL;
508
509 ret->com.res_id = id;
510 ret->com.state = RES_COUNTER_ALLOCATED;
511
512 return &ret->com;
513}
514
ba062d52
JM
515static struct res_common *alloc_xrcdn_tr(int id)
516{
517 struct res_xrcdn *ret;
518
519 ret = kzalloc(sizeof *ret, GFP_KERNEL);
520 if (!ret)
521 return NULL;
522
523 ret->com.res_id = id;
524 ret->com.state = RES_XRCD_ALLOCATED;
525
526 return &ret->com;
527}
528
1b9c6b06
HHZ
529static struct res_common *alloc_fs_rule_tr(u64 id)
530{
531 struct res_fs_rule *ret;
532
533 ret = kzalloc(sizeof *ret, GFP_KERNEL);
534 if (!ret)
535 return NULL;
536
537 ret->com.res_id = id;
538 ret->com.state = RES_FS_RULE_ALLOCATED;
539
540 return &ret->com;
541}
542
aa1ec3dd 543static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
544 int extra)
545{
546 struct res_common *ret;
547
548 switch (type) {
549 case RES_QP:
550 ret = alloc_qp_tr(id);
551 break;
552 case RES_MPT:
553 ret = alloc_mpt_tr(id, extra);
554 break;
555 case RES_MTT:
556 ret = alloc_mtt_tr(id, extra);
557 break;
558 case RES_EQ:
559 ret = alloc_eq_tr(id);
560 break;
561 case RES_CQ:
562 ret = alloc_cq_tr(id);
563 break;
564 case RES_SRQ:
565 ret = alloc_srq_tr(id);
566 break;
567 case RES_MAC:
568 printk(KERN_ERR "implementation missing\n");
569 return NULL;
570 case RES_COUNTER:
571 ret = alloc_counter_tr(id);
572 break;
ba062d52
JM
573 case RES_XRCD:
574 ret = alloc_xrcdn_tr(id);
575 break;
1b9c6b06
HHZ
576 case RES_FS_RULE:
577 ret = alloc_fs_rule_tr(id);
578 break;
c82e9aa0
EC
579 default:
580 return NULL;
581 }
582 if (ret)
583 ret->owner = slave;
584
585 return ret;
586}
587
aa1ec3dd 588static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
589 enum mlx4_resource type, int extra)
590{
591 int i;
592 int err;
593 struct mlx4_priv *priv = mlx4_priv(dev);
594 struct res_common **res_arr;
595 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 596 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
597
598 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
599 if (!res_arr)
600 return -ENOMEM;
601
602 for (i = 0; i < count; ++i) {
603 res_arr[i] = alloc_tr(base + i, type, slave, extra);
604 if (!res_arr[i]) {
605 for (--i; i >= 0; --i)
606 kfree(res_arr[i]);
607
608 kfree(res_arr);
609 return -ENOMEM;
610 }
611 }
612
613 spin_lock_irq(mlx4_tlock(dev));
614 for (i = 0; i < count; ++i) {
615 if (find_res(dev, base + i, type)) {
616 err = -EEXIST;
617 goto undo;
618 }
4af1c048 619 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
620 if (err)
621 goto undo;
622 list_add_tail(&res_arr[i]->list,
623 &tracker->slave_list[slave].res_list[type]);
624 }
625 spin_unlock_irq(mlx4_tlock(dev));
626 kfree(res_arr);
627
628 return 0;
629
630undo:
631 for (--i; i >= base; --i)
4af1c048 632 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
633
634 spin_unlock_irq(mlx4_tlock(dev));
635
636 for (i = 0; i < count; ++i)
637 kfree(res_arr[i]);
638
639 kfree(res_arr);
640
641 return err;
642}
643
644static int remove_qp_ok(struct res_qp *res)
645{
646 if (res->com.state == RES_QP_BUSY)
647 return -EBUSY;
648 else if (res->com.state != RES_QP_RESERVED)
649 return -EPERM;
650
651 return 0;
652}
653
654static int remove_mtt_ok(struct res_mtt *res, int order)
655{
656 if (res->com.state == RES_MTT_BUSY ||
657 atomic_read(&res->ref_count)) {
658 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
659 __func__, __LINE__,
660 mtt_states_str(res->com.state),
661 atomic_read(&res->ref_count));
662 return -EBUSY;
663 } else if (res->com.state != RES_MTT_ALLOCATED)
664 return -EPERM;
665 else if (res->order != order)
666 return -EINVAL;
667
668 return 0;
669}
670
671static int remove_mpt_ok(struct res_mpt *res)
672{
673 if (res->com.state == RES_MPT_BUSY)
674 return -EBUSY;
675 else if (res->com.state != RES_MPT_RESERVED)
676 return -EPERM;
677
678 return 0;
679}
680
681static int remove_eq_ok(struct res_eq *res)
682{
683 if (res->com.state == RES_MPT_BUSY)
684 return -EBUSY;
685 else if (res->com.state != RES_MPT_RESERVED)
686 return -EPERM;
687
688 return 0;
689}
690
691static int remove_counter_ok(struct res_counter *res)
692{
693 if (res->com.state == RES_COUNTER_BUSY)
694 return -EBUSY;
695 else if (res->com.state != RES_COUNTER_ALLOCATED)
696 return -EPERM;
697
698 return 0;
699}
700
ba062d52
JM
701static int remove_xrcdn_ok(struct res_xrcdn *res)
702{
703 if (res->com.state == RES_XRCD_BUSY)
704 return -EBUSY;
705 else if (res->com.state != RES_XRCD_ALLOCATED)
706 return -EPERM;
707
708 return 0;
709}
710
1b9c6b06
HHZ
711static int remove_fs_rule_ok(struct res_fs_rule *res)
712{
713 if (res->com.state == RES_FS_RULE_BUSY)
714 return -EBUSY;
715 else if (res->com.state != RES_FS_RULE_ALLOCATED)
716 return -EPERM;
717
718 return 0;
719}
720
c82e9aa0
EC
721static int remove_cq_ok(struct res_cq *res)
722{
723 if (res->com.state == RES_CQ_BUSY)
724 return -EBUSY;
725 else if (res->com.state != RES_CQ_ALLOCATED)
726 return -EPERM;
727
728 return 0;
729}
730
731static int remove_srq_ok(struct res_srq *res)
732{
733 if (res->com.state == RES_SRQ_BUSY)
734 return -EBUSY;
735 else if (res->com.state != RES_SRQ_ALLOCATED)
736 return -EPERM;
737
738 return 0;
739}
740
741static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
742{
743 switch (type) {
744 case RES_QP:
745 return remove_qp_ok((struct res_qp *)res);
746 case RES_CQ:
747 return remove_cq_ok((struct res_cq *)res);
748 case RES_SRQ:
749 return remove_srq_ok((struct res_srq *)res);
750 case RES_MPT:
751 return remove_mpt_ok((struct res_mpt *)res);
752 case RES_MTT:
753 return remove_mtt_ok((struct res_mtt *)res, extra);
754 case RES_MAC:
755 return -ENOSYS;
756 case RES_EQ:
757 return remove_eq_ok((struct res_eq *)res);
758 case RES_COUNTER:
759 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
760 case RES_XRCD:
761 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
762 case RES_FS_RULE:
763 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
764 default:
765 return -EINVAL;
766 }
767}
768
aa1ec3dd 769static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
770 enum mlx4_resource type, int extra)
771{
aa1ec3dd 772 u64 i;
c82e9aa0
EC
773 int err;
774 struct mlx4_priv *priv = mlx4_priv(dev);
775 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
776 struct res_common *r;
777
778 spin_lock_irq(mlx4_tlock(dev));
779 for (i = base; i < base + count; ++i) {
4af1c048 780 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
781 if (!r) {
782 err = -ENOENT;
783 goto out;
784 }
785 if (r->owner != slave) {
786 err = -EPERM;
787 goto out;
788 }
789 err = remove_ok(r, type, extra);
790 if (err)
791 goto out;
792 }
793
794 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
795 r = res_tracker_lookup(&tracker->res_tree[type], i);
796 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
797 list_del(&r->list);
798 kfree(r);
799 }
800 err = 0;
801
802out:
803 spin_unlock_irq(mlx4_tlock(dev));
804
805 return err;
806}
807
808static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
809 enum res_qp_states state, struct res_qp **qp,
810 int alloc)
811{
812 struct mlx4_priv *priv = mlx4_priv(dev);
813 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
814 struct res_qp *r;
815 int err = 0;
816
817 spin_lock_irq(mlx4_tlock(dev));
4af1c048 818 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
819 if (!r)
820 err = -ENOENT;
821 else if (r->com.owner != slave)
822 err = -EPERM;
823 else {
824 switch (state) {
825 case RES_QP_BUSY:
aa1ec3dd 826 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
827 __func__, r->com.res_id);
828 err = -EBUSY;
829 break;
830
831 case RES_QP_RESERVED:
832 if (r->com.state == RES_QP_MAPPED && !alloc)
833 break;
834
aa1ec3dd 835 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
836 err = -EINVAL;
837 break;
838
839 case RES_QP_MAPPED:
840 if ((r->com.state == RES_QP_RESERVED && alloc) ||
841 r->com.state == RES_QP_HW)
842 break;
843 else {
aa1ec3dd 844 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
845 r->com.res_id);
846 err = -EINVAL;
847 }
848
849 break;
850
851 case RES_QP_HW:
852 if (r->com.state != RES_QP_MAPPED)
853 err = -EINVAL;
854 break;
855 default:
856 err = -EINVAL;
857 }
858
859 if (!err) {
860 r->com.from_state = r->com.state;
861 r->com.to_state = state;
862 r->com.state = RES_QP_BUSY;
863 if (qp)
64699336 864 *qp = r;
c82e9aa0
EC
865 }
866 }
867
868 spin_unlock_irq(mlx4_tlock(dev));
869
870 return err;
871}
872
873static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
874 enum res_mpt_states state, struct res_mpt **mpt)
875{
876 struct mlx4_priv *priv = mlx4_priv(dev);
877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
878 struct res_mpt *r;
879 int err = 0;
880
881 spin_lock_irq(mlx4_tlock(dev));
4af1c048 882 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
883 if (!r)
884 err = -ENOENT;
885 else if (r->com.owner != slave)
886 err = -EPERM;
887 else {
888 switch (state) {
889 case RES_MPT_BUSY:
890 err = -EINVAL;
891 break;
892
893 case RES_MPT_RESERVED:
894 if (r->com.state != RES_MPT_MAPPED)
895 err = -EINVAL;
896 break;
897
898 case RES_MPT_MAPPED:
899 if (r->com.state != RES_MPT_RESERVED &&
900 r->com.state != RES_MPT_HW)
901 err = -EINVAL;
902 break;
903
904 case RES_MPT_HW:
905 if (r->com.state != RES_MPT_MAPPED)
906 err = -EINVAL;
907 break;
908 default:
909 err = -EINVAL;
910 }
911
912 if (!err) {
913 r->com.from_state = r->com.state;
914 r->com.to_state = state;
915 r->com.state = RES_MPT_BUSY;
916 if (mpt)
64699336 917 *mpt = r;
c82e9aa0
EC
918 }
919 }
920
921 spin_unlock_irq(mlx4_tlock(dev));
922
923 return err;
924}
925
926static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
927 enum res_eq_states state, struct res_eq **eq)
928{
929 struct mlx4_priv *priv = mlx4_priv(dev);
930 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
931 struct res_eq *r;
932 int err = 0;
933
934 spin_lock_irq(mlx4_tlock(dev));
4af1c048 935 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
936 if (!r)
937 err = -ENOENT;
938 else if (r->com.owner != slave)
939 err = -EPERM;
940 else {
941 switch (state) {
942 case RES_EQ_BUSY:
943 err = -EINVAL;
944 break;
945
946 case RES_EQ_RESERVED:
947 if (r->com.state != RES_EQ_HW)
948 err = -EINVAL;
949 break;
950
951 case RES_EQ_HW:
952 if (r->com.state != RES_EQ_RESERVED)
953 err = -EINVAL;
954 break;
955
956 default:
957 err = -EINVAL;
958 }
959
960 if (!err) {
961 r->com.from_state = r->com.state;
962 r->com.to_state = state;
963 r->com.state = RES_EQ_BUSY;
964 if (eq)
965 *eq = r;
966 }
967 }
968
969 spin_unlock_irq(mlx4_tlock(dev));
970
971 return err;
972}
973
974static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
975 enum res_cq_states state, struct res_cq **cq)
976{
977 struct mlx4_priv *priv = mlx4_priv(dev);
978 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
979 struct res_cq *r;
980 int err;
981
982 spin_lock_irq(mlx4_tlock(dev));
4af1c048 983 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c82e9aa0
EC
984 if (!r)
985 err = -ENOENT;
986 else if (r->com.owner != slave)
987 err = -EPERM;
988 else {
989 switch (state) {
990 case RES_CQ_BUSY:
991 err = -EBUSY;
992 break;
993
994 case RES_CQ_ALLOCATED:
995 if (r->com.state != RES_CQ_HW)
996 err = -EINVAL;
997 else if (atomic_read(&r->ref_count))
998 err = -EBUSY;
999 else
1000 err = 0;
1001 break;
1002
1003 case RES_CQ_HW:
1004 if (r->com.state != RES_CQ_ALLOCATED)
1005 err = -EINVAL;
1006 else
1007 err = 0;
1008 break;
1009
1010 default:
1011 err = -EINVAL;
1012 }
1013
1014 if (!err) {
1015 r->com.from_state = r->com.state;
1016 r->com.to_state = state;
1017 r->com.state = RES_CQ_BUSY;
1018 if (cq)
1019 *cq = r;
1020 }
1021 }
1022
1023 spin_unlock_irq(mlx4_tlock(dev));
1024
1025 return err;
1026}
1027
1028static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1029 enum res_cq_states state, struct res_srq **srq)
1030{
1031 struct mlx4_priv *priv = mlx4_priv(dev);
1032 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1033 struct res_srq *r;
1034 int err = 0;
1035
1036 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1037 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
c82e9aa0
EC
1038 if (!r)
1039 err = -ENOENT;
1040 else if (r->com.owner != slave)
1041 err = -EPERM;
1042 else {
1043 switch (state) {
1044 case RES_SRQ_BUSY:
1045 err = -EINVAL;
1046 break;
1047
1048 case RES_SRQ_ALLOCATED:
1049 if (r->com.state != RES_SRQ_HW)
1050 err = -EINVAL;
1051 else if (atomic_read(&r->ref_count))
1052 err = -EBUSY;
1053 break;
1054
1055 case RES_SRQ_HW:
1056 if (r->com.state != RES_SRQ_ALLOCATED)
1057 err = -EINVAL;
1058 break;
1059
1060 default:
1061 err = -EINVAL;
1062 }
1063
1064 if (!err) {
1065 r->com.from_state = r->com.state;
1066 r->com.to_state = state;
1067 r->com.state = RES_SRQ_BUSY;
1068 if (srq)
1069 *srq = r;
1070 }
1071 }
1072
1073 spin_unlock_irq(mlx4_tlock(dev));
1074
1075 return err;
1076}
1077
1078static void res_abort_move(struct mlx4_dev *dev, int slave,
1079 enum mlx4_resource type, int id)
1080{
1081 struct mlx4_priv *priv = mlx4_priv(dev);
1082 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1083 struct res_common *r;
1084
1085 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1086 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1087 if (r && (r->owner == slave))
1088 r->state = r->from_state;
1089 spin_unlock_irq(mlx4_tlock(dev));
1090}
1091
1092static void res_end_move(struct mlx4_dev *dev, int slave,
1093 enum mlx4_resource type, int id)
1094{
1095 struct mlx4_priv *priv = mlx4_priv(dev);
1096 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1097 struct res_common *r;
1098
1099 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1100 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1101 if (r && (r->owner == slave))
1102 r->state = r->to_state;
1103 spin_unlock_irq(mlx4_tlock(dev));
1104}
1105
1106static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1107{
1108 return mlx4_is_qp_reserved(dev, qpn);
1109}
1110
1111static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1112 u64 in_param, u64 *out_param)
1113{
1114 int err;
1115 int count;
1116 int align;
1117 int base;
1118 int qpn;
1119
1120 switch (op) {
1121 case RES_OP_RESERVE:
1122 count = get_param_l(&in_param);
1123 align = get_param_h(&in_param);
1124 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1125 if (err)
1126 return err;
1127
1128 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1129 if (err) {
1130 __mlx4_qp_release_range(dev, base, count);
1131 return err;
1132 }
1133 set_param_l(out_param, base);
1134 break;
1135 case RES_OP_MAP_ICM:
1136 qpn = get_param_l(&in_param) & 0x7fffff;
1137 if (valid_reserved(dev, slave, qpn)) {
1138 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1139 if (err)
1140 return err;
1141 }
1142
1143 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1144 NULL, 1);
1145 if (err)
1146 return err;
1147
1148 if (!valid_reserved(dev, slave, qpn)) {
1149 err = __mlx4_qp_alloc_icm(dev, qpn);
1150 if (err) {
1151 res_abort_move(dev, slave, RES_QP, qpn);
1152 return err;
1153 }
1154 }
1155
1156 res_end_move(dev, slave, RES_QP, qpn);
1157 break;
1158
1159 default:
1160 err = -EINVAL;
1161 break;
1162 }
1163 return err;
1164}
1165
1166static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1167 u64 in_param, u64 *out_param)
1168{
1169 int err = -EINVAL;
1170 int base;
1171 int order;
1172
1173 if (op != RES_OP_RESERVE_AND_MAP)
1174 return err;
1175
1176 order = get_param_l(&in_param);
1177 base = __mlx4_alloc_mtt_range(dev, order);
1178 if (base == -1)
1179 return -ENOMEM;
1180
1181 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1182 if (err)
1183 __mlx4_free_mtt_range(dev, base, order);
1184 else
1185 set_param_l(out_param, base);
1186
1187 return err;
1188}
1189
1190static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1191 u64 in_param, u64 *out_param)
1192{
1193 int err = -EINVAL;
1194 int index;
1195 int id;
1196 struct res_mpt *mpt;
1197
1198 switch (op) {
1199 case RES_OP_RESERVE:
1200 index = __mlx4_mr_reserve(dev);
1201 if (index == -1)
1202 break;
1203 id = index & mpt_mask(dev);
1204
1205 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1206 if (err) {
1207 __mlx4_mr_release(dev, index);
1208 break;
1209 }
1210 set_param_l(out_param, index);
1211 break;
1212 case RES_OP_MAP_ICM:
1213 index = get_param_l(&in_param);
1214 id = index & mpt_mask(dev);
1215 err = mr_res_start_move_to(dev, slave, id,
1216 RES_MPT_MAPPED, &mpt);
1217 if (err)
1218 return err;
1219
1220 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1221 if (err) {
1222 res_abort_move(dev, slave, RES_MPT, id);
1223 return err;
1224 }
1225
1226 res_end_move(dev, slave, RES_MPT, id);
1227 break;
1228 }
1229 return err;
1230}
1231
1232static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1233 u64 in_param, u64 *out_param)
1234{
1235 int cqn;
1236 int err;
1237
1238 switch (op) {
1239 case RES_OP_RESERVE_AND_MAP:
1240 err = __mlx4_cq_alloc_icm(dev, &cqn);
1241 if (err)
1242 break;
1243
1244 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1245 if (err) {
1246 __mlx4_cq_free_icm(dev, cqn);
1247 break;
1248 }
1249
1250 set_param_l(out_param, cqn);
1251 break;
1252
1253 default:
1254 err = -EINVAL;
1255 }
1256
1257 return err;
1258}
1259
1260static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1261 u64 in_param, u64 *out_param)
1262{
1263 int srqn;
1264 int err;
1265
1266 switch (op) {
1267 case RES_OP_RESERVE_AND_MAP:
1268 err = __mlx4_srq_alloc_icm(dev, &srqn);
1269 if (err)
1270 break;
1271
1272 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1273 if (err) {
1274 __mlx4_srq_free_icm(dev, srqn);
1275 break;
1276 }
1277
1278 set_param_l(out_param, srqn);
1279 break;
1280
1281 default:
1282 err = -EINVAL;
1283 }
1284
1285 return err;
1286}
1287
1288static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1289{
1290 struct mlx4_priv *priv = mlx4_priv(dev);
1291 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1292 struct mac_res *res;
1293
1294 res = kzalloc(sizeof *res, GFP_KERNEL);
1295 if (!res)
1296 return -ENOMEM;
1297 res->mac = mac;
1298 res->port = (u8) port;
1299 list_add_tail(&res->list,
1300 &tracker->slave_list[slave].res_list[RES_MAC]);
1301 return 0;
1302}
1303
1304static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1305 int port)
1306{
1307 struct mlx4_priv *priv = mlx4_priv(dev);
1308 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1309 struct list_head *mac_list =
1310 &tracker->slave_list[slave].res_list[RES_MAC];
1311 struct mac_res *res, *tmp;
1312
1313 list_for_each_entry_safe(res, tmp, mac_list, list) {
1314 if (res->mac == mac && res->port == (u8) port) {
1315 list_del(&res->list);
1316 kfree(res);
1317 break;
1318 }
1319 }
1320}
1321
1322static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1323{
1324 struct mlx4_priv *priv = mlx4_priv(dev);
1325 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1326 struct list_head *mac_list =
1327 &tracker->slave_list[slave].res_list[RES_MAC];
1328 struct mac_res *res, *tmp;
1329
1330 list_for_each_entry_safe(res, tmp, mac_list, list) {
1331 list_del(&res->list);
1332 __mlx4_unregister_mac(dev, res->port, res->mac);
1333 kfree(res);
1334 }
1335}
1336
1337static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1338 u64 in_param, u64 *out_param)
1339{
1340 int err = -EINVAL;
1341 int port;
1342 u64 mac;
1343
1344 if (op != RES_OP_RESERVE_AND_MAP)
1345 return err;
1346
1347 port = get_param_l(out_param);
1348 mac = in_param;
1349
1350 err = __mlx4_register_mac(dev, port, mac);
1351 if (err >= 0) {
1352 set_param_l(out_param, err);
1353 err = 0;
1354 }
1355
1356 if (!err) {
1357 err = mac_add_to_slave(dev, slave, mac, port);
1358 if (err)
1359 __mlx4_unregister_mac(dev, port, mac);
1360 }
1361 return err;
1362}
1363
ffe455ad
EE
1364static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1365 u64 in_param, u64 *out_param)
1366{
1367 return 0;
1368}
1369
ba062d52
JM
1370static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1371 u64 in_param, u64 *out_param)
1372{
1373 u32 index;
1374 int err;
1375
1376 if (op != RES_OP_RESERVE)
1377 return -EINVAL;
1378
1379 err = __mlx4_counter_alloc(dev, &index);
1380 if (err)
1381 return err;
1382
1383 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1384 if (err)
1385 __mlx4_counter_free(dev, index);
1386 else
1387 set_param_l(out_param, index);
1388
1389 return err;
1390}
1391
1392static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1393 u64 in_param, u64 *out_param)
1394{
1395 u32 xrcdn;
1396 int err;
1397
1398 if (op != RES_OP_RESERVE)
1399 return -EINVAL;
1400
1401 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1402 if (err)
1403 return err;
1404
1405 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1406 if (err)
1407 __mlx4_xrcd_free(dev, xrcdn);
1408 else
1409 set_param_l(out_param, xrcdn);
1410
1411 return err;
1412}
1413
c82e9aa0
EC
1414int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1415 struct mlx4_vhcr *vhcr,
1416 struct mlx4_cmd_mailbox *inbox,
1417 struct mlx4_cmd_mailbox *outbox,
1418 struct mlx4_cmd_info *cmd)
1419{
1420 int err;
1421 int alop = vhcr->op_modifier;
1422
1423 switch (vhcr->in_modifier) {
1424 case RES_QP:
1425 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1426 vhcr->in_param, &vhcr->out_param);
1427 break;
1428
1429 case RES_MTT:
1430 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1431 vhcr->in_param, &vhcr->out_param);
1432 break;
1433
1434 case RES_MPT:
1435 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1436 vhcr->in_param, &vhcr->out_param);
1437 break;
1438
1439 case RES_CQ:
1440 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1441 vhcr->in_param, &vhcr->out_param);
1442 break;
1443
1444 case RES_SRQ:
1445 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1446 vhcr->in_param, &vhcr->out_param);
1447 break;
1448
1449 case RES_MAC:
1450 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1451 vhcr->in_param, &vhcr->out_param);
1452 break;
1453
ffe455ad
EE
1454 case RES_VLAN:
1455 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1456 vhcr->in_param, &vhcr->out_param);
1457 break;
1458
ba062d52
JM
1459 case RES_COUNTER:
1460 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1461 vhcr->in_param, &vhcr->out_param);
1462 break;
1463
1464 case RES_XRCD:
1465 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1466 vhcr->in_param, &vhcr->out_param);
1467 break;
1468
c82e9aa0
EC
1469 default:
1470 err = -EINVAL;
1471 break;
1472 }
1473
1474 return err;
1475}
1476
1477static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1478 u64 in_param)
1479{
1480 int err;
1481 int count;
1482 int base;
1483 int qpn;
1484
1485 switch (op) {
1486 case RES_OP_RESERVE:
1487 base = get_param_l(&in_param) & 0x7fffff;
1488 count = get_param_h(&in_param);
1489 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1490 if (err)
1491 break;
1492 __mlx4_qp_release_range(dev, base, count);
1493 break;
1494 case RES_OP_MAP_ICM:
1495 qpn = get_param_l(&in_param) & 0x7fffff;
1496 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1497 NULL, 0);
1498 if (err)
1499 return err;
1500
1501 if (!valid_reserved(dev, slave, qpn))
1502 __mlx4_qp_free_icm(dev, qpn);
1503
1504 res_end_move(dev, slave, RES_QP, qpn);
1505
1506 if (valid_reserved(dev, slave, qpn))
1507 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1508 break;
1509 default:
1510 err = -EINVAL;
1511 break;
1512 }
1513 return err;
1514}
1515
1516static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1517 u64 in_param, u64 *out_param)
1518{
1519 int err = -EINVAL;
1520 int base;
1521 int order;
1522
1523 if (op != RES_OP_RESERVE_AND_MAP)
1524 return err;
1525
1526 base = get_param_l(&in_param);
1527 order = get_param_h(&in_param);
1528 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1529 if (!err)
1530 __mlx4_free_mtt_range(dev, base, order);
1531 return err;
1532}
1533
1534static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1535 u64 in_param)
1536{
1537 int err = -EINVAL;
1538 int index;
1539 int id;
1540 struct res_mpt *mpt;
1541
1542 switch (op) {
1543 case RES_OP_RESERVE:
1544 index = get_param_l(&in_param);
1545 id = index & mpt_mask(dev);
1546 err = get_res(dev, slave, id, RES_MPT, &mpt);
1547 if (err)
1548 break;
1549 index = mpt->key;
1550 put_res(dev, slave, id, RES_MPT);
1551
1552 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1553 if (err)
1554 break;
1555 __mlx4_mr_release(dev, index);
1556 break;
1557 case RES_OP_MAP_ICM:
1558 index = get_param_l(&in_param);
1559 id = index & mpt_mask(dev);
1560 err = mr_res_start_move_to(dev, slave, id,
1561 RES_MPT_RESERVED, &mpt);
1562 if (err)
1563 return err;
1564
1565 __mlx4_mr_free_icm(dev, mpt->key);
1566 res_end_move(dev, slave, RES_MPT, id);
1567 return err;
1568 break;
1569 default:
1570 err = -EINVAL;
1571 break;
1572 }
1573 return err;
1574}
1575
1576static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1577 u64 in_param, u64 *out_param)
1578{
1579 int cqn;
1580 int err;
1581
1582 switch (op) {
1583 case RES_OP_RESERVE_AND_MAP:
1584 cqn = get_param_l(&in_param);
1585 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1586 if (err)
1587 break;
1588
1589 __mlx4_cq_free_icm(dev, cqn);
1590 break;
1591
1592 default:
1593 err = -EINVAL;
1594 break;
1595 }
1596
1597 return err;
1598}
1599
1600static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1601 u64 in_param, u64 *out_param)
1602{
1603 int srqn;
1604 int err;
1605
1606 switch (op) {
1607 case RES_OP_RESERVE_AND_MAP:
1608 srqn = get_param_l(&in_param);
1609 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1610 if (err)
1611 break;
1612
1613 __mlx4_srq_free_icm(dev, srqn);
1614 break;
1615
1616 default:
1617 err = -EINVAL;
1618 break;
1619 }
1620
1621 return err;
1622}
1623
1624static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1625 u64 in_param, u64 *out_param)
1626{
1627 int port;
1628 int err = 0;
1629
1630 switch (op) {
1631 case RES_OP_RESERVE_AND_MAP:
1632 port = get_param_l(out_param);
1633 mac_del_from_slave(dev, slave, in_param, port);
1634 __mlx4_unregister_mac(dev, port, in_param);
1635 break;
1636 default:
1637 err = -EINVAL;
1638 break;
1639 }
1640
1641 return err;
1642
1643}
1644
ffe455ad
EE
1645static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1646 u64 in_param, u64 *out_param)
1647{
1648 return 0;
1649}
1650
ba062d52
JM
1651static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1652 u64 in_param, u64 *out_param)
1653{
1654 int index;
1655 int err;
1656
1657 if (op != RES_OP_RESERVE)
1658 return -EINVAL;
1659
1660 index = get_param_l(&in_param);
1661 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1662 if (err)
1663 return err;
1664
1665 __mlx4_counter_free(dev, index);
1666
1667 return err;
1668}
1669
1670static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1671 u64 in_param, u64 *out_param)
1672{
1673 int xrcdn;
1674 int err;
1675
1676 if (op != RES_OP_RESERVE)
1677 return -EINVAL;
1678
1679 xrcdn = get_param_l(&in_param);
1680 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1681 if (err)
1682 return err;
1683
1684 __mlx4_xrcd_free(dev, xrcdn);
1685
1686 return err;
1687}
1688
c82e9aa0
EC
1689int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1690 struct mlx4_vhcr *vhcr,
1691 struct mlx4_cmd_mailbox *inbox,
1692 struct mlx4_cmd_mailbox *outbox,
1693 struct mlx4_cmd_info *cmd)
1694{
1695 int err = -EINVAL;
1696 int alop = vhcr->op_modifier;
1697
1698 switch (vhcr->in_modifier) {
1699 case RES_QP:
1700 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1701 vhcr->in_param);
1702 break;
1703
1704 case RES_MTT:
1705 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1706 vhcr->in_param, &vhcr->out_param);
1707 break;
1708
1709 case RES_MPT:
1710 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1711 vhcr->in_param);
1712 break;
1713
1714 case RES_CQ:
1715 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1716 vhcr->in_param, &vhcr->out_param);
1717 break;
1718
1719 case RES_SRQ:
1720 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1721 vhcr->in_param, &vhcr->out_param);
1722 break;
1723
1724 case RES_MAC:
1725 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1726 vhcr->in_param, &vhcr->out_param);
1727 break;
1728
ffe455ad
EE
1729 case RES_VLAN:
1730 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1731 vhcr->in_param, &vhcr->out_param);
1732 break;
1733
ba062d52
JM
1734 case RES_COUNTER:
1735 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1736 vhcr->in_param, &vhcr->out_param);
1737 break;
1738
1739 case RES_XRCD:
1740 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1741 vhcr->in_param, &vhcr->out_param);
1742
c82e9aa0
EC
1743 default:
1744 break;
1745 }
1746 return err;
1747}
1748
1749/* ugly but other choices are uglier */
1750static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1751{
1752 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1753}
1754
2b8fb286 1755static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1756{
2b8fb286 1757 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1758}
1759
1760static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1761{
1762 return be32_to_cpu(mpt->mtt_sz);
1763}
1764
2b8fb286 1765static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
1766{
1767 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1768}
1769
2b8fb286 1770static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
1771{
1772 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1773}
1774
1775static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1776{
1777 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1778 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1779 int log_sq_sride = qpc->sq_size_stride & 7;
1780 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1781 int log_rq_stride = qpc->rq_size_stride & 7;
1782 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1783 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1784 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1785 int sq_size;
1786 int rq_size;
1787 int total_pages;
1788 int total_mem;
1789 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1790
1791 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1792 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1793 total_mem = sq_size + rq_size;
1794 total_pages =
1795 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1796 page_shift);
1797
1798 return total_pages;
1799}
1800
c82e9aa0
EC
1801static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1802 int size, struct res_mtt *mtt)
1803{
2b8fb286
MA
1804 int res_start = mtt->com.res_id;
1805 int res_size = (1 << mtt->order);
c82e9aa0
EC
1806
1807 if (start < res_start || start + size > res_start + res_size)
1808 return -EPERM;
1809 return 0;
1810}
1811
1812int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1813 struct mlx4_vhcr *vhcr,
1814 struct mlx4_cmd_mailbox *inbox,
1815 struct mlx4_cmd_mailbox *outbox,
1816 struct mlx4_cmd_info *cmd)
1817{
1818 int err;
1819 int index = vhcr->in_modifier;
1820 struct res_mtt *mtt;
1821 struct res_mpt *mpt;
2b8fb286 1822 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1823 int phys;
1824 int id;
1825
1826 id = index & mpt_mask(dev);
1827 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1828 if (err)
1829 return err;
1830
1831 phys = mr_phys_mpt(inbox->buf);
1832 if (!phys) {
2b8fb286 1833 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1834 if (err)
1835 goto ex_abort;
1836
1837 err = check_mtt_range(dev, slave, mtt_base,
1838 mr_get_mtt_size(inbox->buf), mtt);
1839 if (err)
1840 goto ex_put;
1841
1842 mpt->mtt = mtt;
1843 }
1844
c82e9aa0
EC
1845 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1846 if (err)
1847 goto ex_put;
1848
1849 if (!phys) {
1850 atomic_inc(&mtt->ref_count);
1851 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1852 }
1853
1854 res_end_move(dev, slave, RES_MPT, id);
1855 return 0;
1856
1857ex_put:
1858 if (!phys)
1859 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1860ex_abort:
1861 res_abort_move(dev, slave, RES_MPT, id);
1862
1863 return err;
1864}
1865
1866int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1867 struct mlx4_vhcr *vhcr,
1868 struct mlx4_cmd_mailbox *inbox,
1869 struct mlx4_cmd_mailbox *outbox,
1870 struct mlx4_cmd_info *cmd)
1871{
1872 int err;
1873 int index = vhcr->in_modifier;
1874 struct res_mpt *mpt;
1875 int id;
1876
1877 id = index & mpt_mask(dev);
1878 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1879 if (err)
1880 return err;
1881
1882 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1883 if (err)
1884 goto ex_abort;
1885
1886 if (mpt->mtt)
1887 atomic_dec(&mpt->mtt->ref_count);
1888
1889 res_end_move(dev, slave, RES_MPT, id);
1890 return 0;
1891
1892ex_abort:
1893 res_abort_move(dev, slave, RES_MPT, id);
1894
1895 return err;
1896}
1897
1898int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1899 struct mlx4_vhcr *vhcr,
1900 struct mlx4_cmd_mailbox *inbox,
1901 struct mlx4_cmd_mailbox *outbox,
1902 struct mlx4_cmd_info *cmd)
1903{
1904 int err;
1905 int index = vhcr->in_modifier;
1906 struct res_mpt *mpt;
1907 int id;
1908
1909 id = index & mpt_mask(dev);
1910 err = get_res(dev, slave, id, RES_MPT, &mpt);
1911 if (err)
1912 return err;
1913
1914 if (mpt->com.from_state != RES_MPT_HW) {
1915 err = -EBUSY;
1916 goto out;
1917 }
1918
1919 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1920
1921out:
1922 put_res(dev, slave, id, RES_MPT);
1923 return err;
1924}
1925
1926static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1927{
1928 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1929}
1930
1931static int qp_get_scqn(struct mlx4_qp_context *qpc)
1932{
1933 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1934}
1935
1936static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1937{
1938 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1939}
1940
1941int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1942 struct mlx4_vhcr *vhcr,
1943 struct mlx4_cmd_mailbox *inbox,
1944 struct mlx4_cmd_mailbox *outbox,
1945 struct mlx4_cmd_info *cmd)
1946{
1947 int err;
1948 int qpn = vhcr->in_modifier & 0x7fffff;
1949 struct res_mtt *mtt;
1950 struct res_qp *qp;
1951 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 1952 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1953 int mtt_size = qp_get_mtt_size(qpc);
1954 struct res_cq *rcq;
1955 struct res_cq *scq;
1956 int rcqn = qp_get_rcqn(qpc);
1957 int scqn = qp_get_scqn(qpc);
1958 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1960 struct res_srq *srq;
1961 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1962
1963 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1964 if (err)
1965 return err;
1966 qp->local_qpn = local_qpn;
1967
2b8fb286 1968 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1969 if (err)
1970 goto ex_abort;
1971
1972 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1973 if (err)
1974 goto ex_put_mtt;
1975
c82e9aa0
EC
1976 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1977 if (err)
1978 goto ex_put_mtt;
1979
1980 if (scqn != rcqn) {
1981 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1982 if (err)
1983 goto ex_put_rcq;
1984 } else
1985 scq = rcq;
1986
1987 if (use_srq) {
1988 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1989 if (err)
1990 goto ex_put_scq;
1991 }
1992
1993 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1994 if (err)
1995 goto ex_put_srq;
1996 atomic_inc(&mtt->ref_count);
1997 qp->mtt = mtt;
1998 atomic_inc(&rcq->ref_count);
1999 qp->rcq = rcq;
2000 atomic_inc(&scq->ref_count);
2001 qp->scq = scq;
2002
2003 if (scqn != rcqn)
2004 put_res(dev, slave, scqn, RES_CQ);
2005
2006 if (use_srq) {
2007 atomic_inc(&srq->ref_count);
2008 put_res(dev, slave, srqn, RES_SRQ);
2009 qp->srq = srq;
2010 }
2011 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2012 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2013 res_end_move(dev, slave, RES_QP, qpn);
2014
2015 return 0;
2016
2017ex_put_srq:
2018 if (use_srq)
2019 put_res(dev, slave, srqn, RES_SRQ);
2020ex_put_scq:
2021 if (scqn != rcqn)
2022 put_res(dev, slave, scqn, RES_CQ);
2023ex_put_rcq:
2024 put_res(dev, slave, rcqn, RES_CQ);
2025ex_put_mtt:
2b8fb286 2026 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2027ex_abort:
2028 res_abort_move(dev, slave, RES_QP, qpn);
2029
2030 return err;
2031}
2032
2b8fb286 2033static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2034{
2035 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2036}
2037
2038static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2039{
2040 int log_eq_size = eqc->log_eq_size & 0x1f;
2041 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2042
2043 if (log_eq_size + 5 < page_shift)
2044 return 1;
2045
2046 return 1 << (log_eq_size + 5 - page_shift);
2047}
2048
2b8fb286 2049static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2050{
2051 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2052}
2053
2054static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2055{
2056 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2057 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2058
2059 if (log_cq_size + 5 < page_shift)
2060 return 1;
2061
2062 return 1 << (log_cq_size + 5 - page_shift);
2063}
2064
2065int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2066 struct mlx4_vhcr *vhcr,
2067 struct mlx4_cmd_mailbox *inbox,
2068 struct mlx4_cmd_mailbox *outbox,
2069 struct mlx4_cmd_info *cmd)
2070{
2071 int err;
2072 int eqn = vhcr->in_modifier;
2073 int res_id = (slave << 8) | eqn;
2074 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2075 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2076 int mtt_size = eq_get_mtt_size(eqc);
2077 struct res_eq *eq;
2078 struct res_mtt *mtt;
2079
2080 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2081 if (err)
2082 return err;
2083 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2084 if (err)
2085 goto out_add;
2086
2b8fb286 2087 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2088 if (err)
2089 goto out_move;
2090
2091 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2092 if (err)
2093 goto out_put;
2094
2095 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2096 if (err)
2097 goto out_put;
2098
2099 atomic_inc(&mtt->ref_count);
2100 eq->mtt = mtt;
2101 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2102 res_end_move(dev, slave, RES_EQ, res_id);
2103 return 0;
2104
2105out_put:
2106 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2107out_move:
2108 res_abort_move(dev, slave, RES_EQ, res_id);
2109out_add:
2110 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2111 return err;
2112}
2113
2114static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2115 int len, struct res_mtt **res)
2116{
2117 struct mlx4_priv *priv = mlx4_priv(dev);
2118 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2119 struct res_mtt *mtt;
2120 int err = -EINVAL;
2121
2122 spin_lock_irq(mlx4_tlock(dev));
2123 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2124 com.list) {
2125 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2126 *res = mtt;
2127 mtt->com.from_state = mtt->com.state;
2128 mtt->com.state = RES_MTT_BUSY;
2129 err = 0;
2130 break;
2131 }
2132 }
2133 spin_unlock_irq(mlx4_tlock(dev));
2134
2135 return err;
2136}
2137
2138int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2139 struct mlx4_vhcr *vhcr,
2140 struct mlx4_cmd_mailbox *inbox,
2141 struct mlx4_cmd_mailbox *outbox,
2142 struct mlx4_cmd_info *cmd)
2143{
2144 struct mlx4_mtt mtt;
2145 __be64 *page_list = inbox->buf;
2146 u64 *pg_list = (u64 *)page_list;
2147 int i;
2148 struct res_mtt *rmtt = NULL;
2149 int start = be64_to_cpu(page_list[0]);
2150 int npages = vhcr->in_modifier;
2151 int err;
2152
2153 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2154 if (err)
2155 return err;
2156
2157 /* Call the SW implementation of write_mtt:
2158 * - Prepare a dummy mtt struct
2159 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2160 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2161 we don't really use it */
c82e9aa0
EC
2162 mtt.order = 0;
2163 mtt.page_shift = 0;
2164 for (i = 0; i < npages; ++i)
2165 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2166
2167 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2168 ((u64 *)page_list + 2));
2169
2170 if (rmtt)
2171 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2172
2173 return err;
2174}
2175
2176int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2177 struct mlx4_vhcr *vhcr,
2178 struct mlx4_cmd_mailbox *inbox,
2179 struct mlx4_cmd_mailbox *outbox,
2180 struct mlx4_cmd_info *cmd)
2181{
2182 int eqn = vhcr->in_modifier;
2183 int res_id = eqn | (slave << 8);
2184 struct res_eq *eq;
2185 int err;
2186
2187 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2188 if (err)
2189 return err;
2190
2191 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2192 if (err)
2193 goto ex_abort;
2194
2195 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2196 if (err)
2197 goto ex_put;
2198
2199 atomic_dec(&eq->mtt->ref_count);
2200 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2201 res_end_move(dev, slave, RES_EQ, res_id);
2202 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2203
2204 return 0;
2205
2206ex_put:
2207 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2208ex_abort:
2209 res_abort_move(dev, slave, RES_EQ, res_id);
2210
2211 return err;
2212}
2213
2214int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2215{
2216 struct mlx4_priv *priv = mlx4_priv(dev);
2217 struct mlx4_slave_event_eq_info *event_eq;
2218 struct mlx4_cmd_mailbox *mailbox;
2219 u32 in_modifier = 0;
2220 int err;
2221 int res_id;
2222 struct res_eq *req;
2223
2224 if (!priv->mfunc.master.slave_state)
2225 return -EINVAL;
2226
803143fb 2227 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2228
2229 /* Create the event only if the slave is registered */
803143fb 2230 if (event_eq->eqn < 0)
c82e9aa0
EC
2231 return 0;
2232
2233 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2234 res_id = (slave << 8) | event_eq->eqn;
2235 err = get_res(dev, slave, res_id, RES_EQ, &req);
2236 if (err)
2237 goto unlock;
2238
2239 if (req->com.from_state != RES_EQ_HW) {
2240 err = -EINVAL;
2241 goto put;
2242 }
2243
2244 mailbox = mlx4_alloc_cmd_mailbox(dev);
2245 if (IS_ERR(mailbox)) {
2246 err = PTR_ERR(mailbox);
2247 goto put;
2248 }
2249
2250 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2251 ++event_eq->token;
2252 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2253 }
2254
2255 memcpy(mailbox->buf, (u8 *) eqe, 28);
2256
2257 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2258
2259 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2260 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2261 MLX4_CMD_NATIVE);
2262
2263 put_res(dev, slave, res_id, RES_EQ);
2264 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2265 mlx4_free_cmd_mailbox(dev, mailbox);
2266 return err;
2267
2268put:
2269 put_res(dev, slave, res_id, RES_EQ);
2270
2271unlock:
2272 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2273 return err;
2274}
2275
2276int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2277 struct mlx4_vhcr *vhcr,
2278 struct mlx4_cmd_mailbox *inbox,
2279 struct mlx4_cmd_mailbox *outbox,
2280 struct mlx4_cmd_info *cmd)
2281{
2282 int eqn = vhcr->in_modifier;
2283 int res_id = eqn | (slave << 8);
2284 struct res_eq *eq;
2285 int err;
2286
2287 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2288 if (err)
2289 return err;
2290
2291 if (eq->com.from_state != RES_EQ_HW) {
2292 err = -EINVAL;
2293 goto ex_put;
2294 }
2295
2296 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2297
2298ex_put:
2299 put_res(dev, slave, res_id, RES_EQ);
2300 return err;
2301}
2302
2303int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2304 struct mlx4_vhcr *vhcr,
2305 struct mlx4_cmd_mailbox *inbox,
2306 struct mlx4_cmd_mailbox *outbox,
2307 struct mlx4_cmd_info *cmd)
2308{
2309 int err;
2310 int cqn = vhcr->in_modifier;
2311 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2312 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2313 struct res_cq *cq;
2314 struct res_mtt *mtt;
2315
2316 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2317 if (err)
2318 return err;
2b8fb286 2319 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2320 if (err)
2321 goto out_move;
2322 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2323 if (err)
2324 goto out_put;
2325 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2326 if (err)
2327 goto out_put;
2328 atomic_inc(&mtt->ref_count);
2329 cq->mtt = mtt;
2330 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2331 res_end_move(dev, slave, RES_CQ, cqn);
2332 return 0;
2333
2334out_put:
2335 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2336out_move:
2337 res_abort_move(dev, slave, RES_CQ, cqn);
2338 return err;
2339}
2340
2341int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2342 struct mlx4_vhcr *vhcr,
2343 struct mlx4_cmd_mailbox *inbox,
2344 struct mlx4_cmd_mailbox *outbox,
2345 struct mlx4_cmd_info *cmd)
2346{
2347 int err;
2348 int cqn = vhcr->in_modifier;
2349 struct res_cq *cq;
2350
2351 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2352 if (err)
2353 return err;
2354 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2355 if (err)
2356 goto out_move;
2357 atomic_dec(&cq->mtt->ref_count);
2358 res_end_move(dev, slave, RES_CQ, cqn);
2359 return 0;
2360
2361out_move:
2362 res_abort_move(dev, slave, RES_CQ, cqn);
2363 return err;
2364}
2365
2366int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2367 struct mlx4_vhcr *vhcr,
2368 struct mlx4_cmd_mailbox *inbox,
2369 struct mlx4_cmd_mailbox *outbox,
2370 struct mlx4_cmd_info *cmd)
2371{
2372 int cqn = vhcr->in_modifier;
2373 struct res_cq *cq;
2374 int err;
2375
2376 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2377 if (err)
2378 return err;
2379
2380 if (cq->com.from_state != RES_CQ_HW)
2381 goto ex_put;
2382
2383 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2384ex_put:
2385 put_res(dev, slave, cqn, RES_CQ);
2386
2387 return err;
2388}
2389
2390static int handle_resize(struct mlx4_dev *dev, int slave,
2391 struct mlx4_vhcr *vhcr,
2392 struct mlx4_cmd_mailbox *inbox,
2393 struct mlx4_cmd_mailbox *outbox,
2394 struct mlx4_cmd_info *cmd,
2395 struct res_cq *cq)
2396{
2397 int err;
2398 struct res_mtt *orig_mtt;
2399 struct res_mtt *mtt;
2400 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2401 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2402
2403 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2404 if (err)
2405 return err;
2406
2407 if (orig_mtt != cq->mtt) {
2408 err = -EINVAL;
2409 goto ex_put;
2410 }
2411
2b8fb286 2412 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2413 if (err)
2414 goto ex_put;
2415
2416 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2417 if (err)
2418 goto ex_put1;
2419 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2420 if (err)
2421 goto ex_put1;
2422 atomic_dec(&orig_mtt->ref_count);
2423 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2424 atomic_inc(&mtt->ref_count);
2425 cq->mtt = mtt;
2426 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2427 return 0;
2428
2429ex_put1:
2430 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2431ex_put:
2432 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2433
2434 return err;
2435
2436}
2437
2438int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2439 struct mlx4_vhcr *vhcr,
2440 struct mlx4_cmd_mailbox *inbox,
2441 struct mlx4_cmd_mailbox *outbox,
2442 struct mlx4_cmd_info *cmd)
2443{
2444 int cqn = vhcr->in_modifier;
2445 struct res_cq *cq;
2446 int err;
2447
2448 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2449 if (err)
2450 return err;
2451
2452 if (cq->com.from_state != RES_CQ_HW)
2453 goto ex_put;
2454
2455 if (vhcr->op_modifier == 0) {
2456 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2457 goto ex_put;
c82e9aa0
EC
2458 }
2459
2460 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2461ex_put:
2462 put_res(dev, slave, cqn, RES_CQ);
2463
2464 return err;
2465}
2466
c82e9aa0
EC
2467static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2468{
2469 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2470 int log_rq_stride = srqc->logstride & 7;
2471 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2472
2473 if (log_srq_size + log_rq_stride + 4 < page_shift)
2474 return 1;
2475
2476 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2477}
2478
2479int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2480 struct mlx4_vhcr *vhcr,
2481 struct mlx4_cmd_mailbox *inbox,
2482 struct mlx4_cmd_mailbox *outbox,
2483 struct mlx4_cmd_info *cmd)
2484{
2485 int err;
2486 int srqn = vhcr->in_modifier;
2487 struct res_mtt *mtt;
2488 struct res_srq *srq;
2489 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2490 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2491
2492 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2493 return -EINVAL;
2494
2495 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2496 if (err)
2497 return err;
2b8fb286 2498 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2499 if (err)
2500 goto ex_abort;
2501 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2502 mtt);
2503 if (err)
2504 goto ex_put_mtt;
2505
c82e9aa0
EC
2506 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2507 if (err)
2508 goto ex_put_mtt;
2509
2510 atomic_inc(&mtt->ref_count);
2511 srq->mtt = mtt;
2512 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2513 res_end_move(dev, slave, RES_SRQ, srqn);
2514 return 0;
2515
2516ex_put_mtt:
2517 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2518ex_abort:
2519 res_abort_move(dev, slave, RES_SRQ, srqn);
2520
2521 return err;
2522}
2523
2524int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2525 struct mlx4_vhcr *vhcr,
2526 struct mlx4_cmd_mailbox *inbox,
2527 struct mlx4_cmd_mailbox *outbox,
2528 struct mlx4_cmd_info *cmd)
2529{
2530 int err;
2531 int srqn = vhcr->in_modifier;
2532 struct res_srq *srq;
2533
2534 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2535 if (err)
2536 return err;
2537 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2538 if (err)
2539 goto ex_abort;
2540 atomic_dec(&srq->mtt->ref_count);
2541 if (srq->cq)
2542 atomic_dec(&srq->cq->ref_count);
2543 res_end_move(dev, slave, RES_SRQ, srqn);
2544
2545 return 0;
2546
2547ex_abort:
2548 res_abort_move(dev, slave, RES_SRQ, srqn);
2549
2550 return err;
2551}
2552
2553int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2554 struct mlx4_vhcr *vhcr,
2555 struct mlx4_cmd_mailbox *inbox,
2556 struct mlx4_cmd_mailbox *outbox,
2557 struct mlx4_cmd_info *cmd)
2558{
2559 int err;
2560 int srqn = vhcr->in_modifier;
2561 struct res_srq *srq;
2562
2563 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2564 if (err)
2565 return err;
2566 if (srq->com.from_state != RES_SRQ_HW) {
2567 err = -EBUSY;
2568 goto out;
2569 }
2570 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2571out:
2572 put_res(dev, slave, srqn, RES_SRQ);
2573 return err;
2574}
2575
2576int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2577 struct mlx4_vhcr *vhcr,
2578 struct mlx4_cmd_mailbox *inbox,
2579 struct mlx4_cmd_mailbox *outbox,
2580 struct mlx4_cmd_info *cmd)
2581{
2582 int err;
2583 int srqn = vhcr->in_modifier;
2584 struct res_srq *srq;
2585
2586 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2587 if (err)
2588 return err;
2589
2590 if (srq->com.from_state != RES_SRQ_HW) {
2591 err = -EBUSY;
2592 goto out;
2593 }
2594
2595 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2596out:
2597 put_res(dev, slave, srqn, RES_SRQ);
2598 return err;
2599}
2600
2601int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2602 struct mlx4_vhcr *vhcr,
2603 struct mlx4_cmd_mailbox *inbox,
2604 struct mlx4_cmd_mailbox *outbox,
2605 struct mlx4_cmd_info *cmd)
2606{
2607 int err;
2608 int qpn = vhcr->in_modifier & 0x7fffff;
2609 struct res_qp *qp;
2610
2611 err = get_res(dev, slave, qpn, RES_QP, &qp);
2612 if (err)
2613 return err;
2614 if (qp->com.from_state != RES_QP_HW) {
2615 err = -EBUSY;
2616 goto out;
2617 }
2618
2619 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2620out:
2621 put_res(dev, slave, qpn, RES_QP);
2622 return err;
2623}
2624
2625int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2626 struct mlx4_vhcr *vhcr,
2627 struct mlx4_cmd_mailbox *inbox,
2628 struct mlx4_cmd_mailbox *outbox,
2629 struct mlx4_cmd_info *cmd)
2630{
2631 struct mlx4_qp_context *qpc = inbox->buf + 8;
2632
2633 update_ud_gid(dev, qpc, (u8)slave);
2634
2635 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2636}
2637
2638int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2639 struct mlx4_vhcr *vhcr,
2640 struct mlx4_cmd_mailbox *inbox,
2641 struct mlx4_cmd_mailbox *outbox,
2642 struct mlx4_cmd_info *cmd)
2643{
2644 int err;
2645 int qpn = vhcr->in_modifier & 0x7fffff;
2646 struct res_qp *qp;
2647
2648 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2649 if (err)
2650 return err;
2651 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2652 if (err)
2653 goto ex_abort;
2654
2655 atomic_dec(&qp->mtt->ref_count);
2656 atomic_dec(&qp->rcq->ref_count);
2657 atomic_dec(&qp->scq->ref_count);
2658 if (qp->srq)
2659 atomic_dec(&qp->srq->ref_count);
2660 res_end_move(dev, slave, RES_QP, qpn);
2661 return 0;
2662
2663ex_abort:
2664 res_abort_move(dev, slave, RES_QP, qpn);
2665
2666 return err;
2667}
2668
2669static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2670 struct res_qp *rqp, u8 *gid)
2671{
2672 struct res_gid *res;
2673
2674 list_for_each_entry(res, &rqp->mcg_list, list) {
2675 if (!memcmp(res->gid, gid, 16))
2676 return res;
2677 }
2678 return NULL;
2679}
2680
2681static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2682 u8 *gid, enum mlx4_protocol prot,
2683 enum mlx4_steer_type steer)
c82e9aa0
EC
2684{
2685 struct res_gid *res;
2686 int err;
2687
2688 res = kzalloc(sizeof *res, GFP_KERNEL);
2689 if (!res)
2690 return -ENOMEM;
2691
2692 spin_lock_irq(&rqp->mcg_spl);
2693 if (find_gid(dev, slave, rqp, gid)) {
2694 kfree(res);
2695 err = -EEXIST;
2696 } else {
2697 memcpy(res->gid, gid, 16);
2698 res->prot = prot;
9f5b6c63 2699 res->steer = steer;
c82e9aa0
EC
2700 list_add_tail(&res->list, &rqp->mcg_list);
2701 err = 0;
2702 }
2703 spin_unlock_irq(&rqp->mcg_spl);
2704
2705 return err;
2706}
2707
2708static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2709 u8 *gid, enum mlx4_protocol prot,
2710 enum mlx4_steer_type steer)
c82e9aa0
EC
2711{
2712 struct res_gid *res;
2713 int err;
2714
2715 spin_lock_irq(&rqp->mcg_spl);
2716 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 2717 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
2718 err = -EINVAL;
2719 else {
2720 list_del(&res->list);
2721 kfree(res);
2722 err = 0;
2723 }
2724 spin_unlock_irq(&rqp->mcg_spl);
2725
2726 return err;
2727}
2728
2729int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2730 struct mlx4_vhcr *vhcr,
2731 struct mlx4_cmd_mailbox *inbox,
2732 struct mlx4_cmd_mailbox *outbox,
2733 struct mlx4_cmd_info *cmd)
2734{
2735 struct mlx4_qp qp; /* dummy for calling attach/detach */
2736 u8 *gid = inbox->buf;
2737 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 2738 int err;
c82e9aa0
EC
2739 int qpn;
2740 struct res_qp *rqp;
2741 int attach = vhcr->op_modifier;
2742 int block_loopback = vhcr->in_modifier >> 31;
2743 u8 steer_type_mask = 2;
75c6062c 2744 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
2745
2746 qpn = vhcr->in_modifier & 0xffffff;
2747 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2748 if (err)
2749 return err;
2750
2751 qp.qpn = qpn;
2752 if (attach) {
9f5b6c63 2753 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2754 if (err)
2755 goto ex_put;
2756
2757 err = mlx4_qp_attach_common(dev, &qp, gid,
2758 block_loopback, prot, type);
2759 if (err)
2760 goto ex_rem;
2761 } else {
9f5b6c63 2762 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2763 if (err)
2764 goto ex_put;
2765 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2766 }
2767
2768 put_res(dev, slave, qpn, RES_QP);
2769 return 0;
2770
2771ex_rem:
2772 /* ignore error return below, already in error */
162344ed 2773 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2774ex_put:
2775 put_res(dev, slave, qpn, RES_QP);
2776
2777 return err;
2778}
2779
8fcfb4db
HHZ
2780int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2781 struct mlx4_vhcr *vhcr,
2782 struct mlx4_cmd_mailbox *inbox,
2783 struct mlx4_cmd_mailbox *outbox,
2784 struct mlx4_cmd_info *cmd)
2785{
1b9c6b06
HHZ
2786 int err;
2787
0ff1fb65
HHZ
2788 if (dev->caps.steering_mode !=
2789 MLX4_STEERING_MODE_DEVICE_MANAGED)
2790 return -EOPNOTSUPP;
1b9c6b06
HHZ
2791
2792 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2793 vhcr->in_modifier, 0,
2794 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2795 MLX4_CMD_NATIVE);
2796 if (err)
2797 return err;
2798
2799 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
2800 if (err) {
2801 mlx4_err(dev, "Fail to add flow steering resources.\n ");
2802 /* detach rule*/
2803 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2804 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2805 MLX4_CMD_NATIVE);
2806 }
2807 return err;
8fcfb4db
HHZ
2808}
2809
2810int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2811 struct mlx4_vhcr *vhcr,
2812 struct mlx4_cmd_mailbox *inbox,
2813 struct mlx4_cmd_mailbox *outbox,
2814 struct mlx4_cmd_info *cmd)
2815{
1b9c6b06
HHZ
2816 int err;
2817
0ff1fb65
HHZ
2818 if (dev->caps.steering_mode !=
2819 MLX4_STEERING_MODE_DEVICE_MANAGED)
2820 return -EOPNOTSUPP;
1b9c6b06
HHZ
2821
2822 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
2823 if (err) {
2824 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2825 return err;
2826 }
2827
2828 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
2829 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2830 MLX4_CMD_NATIVE);
2831 return err;
8fcfb4db
HHZ
2832}
2833
c82e9aa0
EC
2834enum {
2835 BUSY_MAX_RETRIES = 10
2836};
2837
2838int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2839 struct mlx4_vhcr *vhcr,
2840 struct mlx4_cmd_mailbox *inbox,
2841 struct mlx4_cmd_mailbox *outbox,
2842 struct mlx4_cmd_info *cmd)
2843{
2844 int err;
2845 int index = vhcr->in_modifier & 0xffff;
2846
2847 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2848 if (err)
2849 return err;
2850
2851 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2852 put_res(dev, slave, index, RES_COUNTER);
2853 return err;
2854}
2855
2856static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2857{
2858 struct res_gid *rgid;
2859 struct res_gid *tmp;
c82e9aa0
EC
2860 struct mlx4_qp qp; /* dummy for calling attach/detach */
2861
2862 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2863 qp.qpn = rqp->local_qpn;
162344ed
OG
2864 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2865 rgid->steer);
c82e9aa0
EC
2866 list_del(&rgid->list);
2867 kfree(rgid);
2868 }
2869}
2870
2871static int _move_all_busy(struct mlx4_dev *dev, int slave,
2872 enum mlx4_resource type, int print)
2873{
2874 struct mlx4_priv *priv = mlx4_priv(dev);
2875 struct mlx4_resource_tracker *tracker =
2876 &priv->mfunc.master.res_tracker;
2877 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2878 struct res_common *r;
2879 struct res_common *tmp;
2880 int busy;
2881
2882 busy = 0;
2883 spin_lock_irq(mlx4_tlock(dev));
2884 list_for_each_entry_safe(r, tmp, rlist, list) {
2885 if (r->owner == slave) {
2886 if (!r->removing) {
2887 if (r->state == RES_ANY_BUSY) {
2888 if (print)
2889 mlx4_dbg(dev,
aa1ec3dd 2890 "%s id 0x%llx is busy\n",
c82e9aa0
EC
2891 ResourceType(type),
2892 r->res_id);
2893 ++busy;
2894 } else {
2895 r->from_state = r->state;
2896 r->state = RES_ANY_BUSY;
2897 r->removing = 1;
2898 }
2899 }
2900 }
2901 }
2902 spin_unlock_irq(mlx4_tlock(dev));
2903
2904 return busy;
2905}
2906
2907static int move_all_busy(struct mlx4_dev *dev, int slave,
2908 enum mlx4_resource type)
2909{
2910 unsigned long begin;
2911 int busy;
2912
2913 begin = jiffies;
2914 do {
2915 busy = _move_all_busy(dev, slave, type, 0);
2916 if (time_after(jiffies, begin + 5 * HZ))
2917 break;
2918 if (busy)
2919 cond_resched();
2920 } while (busy);
2921
2922 if (busy)
2923 busy = _move_all_busy(dev, slave, type, 1);
2924
2925 return busy;
2926}
2927static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2928{
2929 struct mlx4_priv *priv = mlx4_priv(dev);
2930 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2931 struct list_head *qp_list =
2932 &tracker->slave_list[slave].res_list[RES_QP];
2933 struct res_qp *qp;
2934 struct res_qp *tmp;
2935 int state;
2936 u64 in_param;
2937 int qpn;
2938 int err;
2939
2940 err = move_all_busy(dev, slave, RES_QP);
2941 if (err)
2942 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2943 "for slave %d\n", slave);
2944
2945 spin_lock_irq(mlx4_tlock(dev));
2946 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2947 spin_unlock_irq(mlx4_tlock(dev));
2948 if (qp->com.owner == slave) {
2949 qpn = qp->com.res_id;
2950 detach_qp(dev, slave, qp);
2951 state = qp->com.from_state;
2952 while (state != 0) {
2953 switch (state) {
2954 case RES_QP_RESERVED:
2955 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
2956 rb_erase(&qp->com.node,
2957 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
2958 list_del(&qp->com.list);
2959 spin_unlock_irq(mlx4_tlock(dev));
2960 kfree(qp);
2961 state = 0;
2962 break;
2963 case RES_QP_MAPPED:
2964 if (!valid_reserved(dev, slave, qpn))
2965 __mlx4_qp_free_icm(dev, qpn);
2966 state = RES_QP_RESERVED;
2967 break;
2968 case RES_QP_HW:
2969 in_param = slave;
2970 err = mlx4_cmd(dev, in_param,
2971 qp->local_qpn, 2,
2972 MLX4_CMD_2RST_QP,
2973 MLX4_CMD_TIME_CLASS_A,
2974 MLX4_CMD_NATIVE);
2975 if (err)
2976 mlx4_dbg(dev, "rem_slave_qps: failed"
2977 " to move slave %d qpn %d to"
2978 " reset\n", slave,
2979 qp->local_qpn);
2980 atomic_dec(&qp->rcq->ref_count);
2981 atomic_dec(&qp->scq->ref_count);
2982 atomic_dec(&qp->mtt->ref_count);
2983 if (qp->srq)
2984 atomic_dec(&qp->srq->ref_count);
2985 state = RES_QP_MAPPED;
2986 break;
2987 default:
2988 state = 0;
2989 }
2990 }
2991 }
2992 spin_lock_irq(mlx4_tlock(dev));
2993 }
2994 spin_unlock_irq(mlx4_tlock(dev));
2995}
2996
2997static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2998{
2999 struct mlx4_priv *priv = mlx4_priv(dev);
3000 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3001 struct list_head *srq_list =
3002 &tracker->slave_list[slave].res_list[RES_SRQ];
3003 struct res_srq *srq;
3004 struct res_srq *tmp;
3005 int state;
3006 u64 in_param;
3007 LIST_HEAD(tlist);
3008 int srqn;
3009 int err;
3010
3011 err = move_all_busy(dev, slave, RES_SRQ);
3012 if (err)
3013 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3014 "busy for slave %d\n", slave);
3015
3016 spin_lock_irq(mlx4_tlock(dev));
3017 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3018 spin_unlock_irq(mlx4_tlock(dev));
3019 if (srq->com.owner == slave) {
3020 srqn = srq->com.res_id;
3021 state = srq->com.from_state;
3022 while (state != 0) {
3023 switch (state) {
3024 case RES_SRQ_ALLOCATED:
3025 __mlx4_srq_free_icm(dev, srqn);
3026 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3027 rb_erase(&srq->com.node,
3028 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
3029 list_del(&srq->com.list);
3030 spin_unlock_irq(mlx4_tlock(dev));
3031 kfree(srq);
3032 state = 0;
3033 break;
3034
3035 case RES_SRQ_HW:
3036 in_param = slave;
3037 err = mlx4_cmd(dev, in_param, srqn, 1,
3038 MLX4_CMD_HW2SW_SRQ,
3039 MLX4_CMD_TIME_CLASS_A,
3040 MLX4_CMD_NATIVE);
3041 if (err)
3042 mlx4_dbg(dev, "rem_slave_srqs: failed"
3043 " to move slave %d srq %d to"
3044 " SW ownership\n",
3045 slave, srqn);
3046
3047 atomic_dec(&srq->mtt->ref_count);
3048 if (srq->cq)
3049 atomic_dec(&srq->cq->ref_count);
3050 state = RES_SRQ_ALLOCATED;
3051 break;
3052
3053 default:
3054 state = 0;
3055 }
3056 }
3057 }
3058 spin_lock_irq(mlx4_tlock(dev));
3059 }
3060 spin_unlock_irq(mlx4_tlock(dev));
3061}
3062
3063static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3064{
3065 struct mlx4_priv *priv = mlx4_priv(dev);
3066 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3067 struct list_head *cq_list =
3068 &tracker->slave_list[slave].res_list[RES_CQ];
3069 struct res_cq *cq;
3070 struct res_cq *tmp;
3071 int state;
3072 u64 in_param;
3073 LIST_HEAD(tlist);
3074 int cqn;
3075 int err;
3076
3077 err = move_all_busy(dev, slave, RES_CQ);
3078 if (err)
3079 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3080 "busy for slave %d\n", slave);
3081
3082 spin_lock_irq(mlx4_tlock(dev));
3083 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3084 spin_unlock_irq(mlx4_tlock(dev));
3085 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3086 cqn = cq->com.res_id;
3087 state = cq->com.from_state;
3088 while (state != 0) {
3089 switch (state) {
3090 case RES_CQ_ALLOCATED:
3091 __mlx4_cq_free_icm(dev, cqn);
3092 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3093 rb_erase(&cq->com.node,
3094 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
3095 list_del(&cq->com.list);
3096 spin_unlock_irq(mlx4_tlock(dev));
3097 kfree(cq);
3098 state = 0;
3099 break;
3100
3101 case RES_CQ_HW:
3102 in_param = slave;
3103 err = mlx4_cmd(dev, in_param, cqn, 1,
3104 MLX4_CMD_HW2SW_CQ,
3105 MLX4_CMD_TIME_CLASS_A,
3106 MLX4_CMD_NATIVE);
3107 if (err)
3108 mlx4_dbg(dev, "rem_slave_cqs: failed"
3109 " to move slave %d cq %d to"
3110 " SW ownership\n",
3111 slave, cqn);
3112 atomic_dec(&cq->mtt->ref_count);
3113 state = RES_CQ_ALLOCATED;
3114 break;
3115
3116 default:
3117 state = 0;
3118 }
3119 }
3120 }
3121 spin_lock_irq(mlx4_tlock(dev));
3122 }
3123 spin_unlock_irq(mlx4_tlock(dev));
3124}
3125
3126static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3127{
3128 struct mlx4_priv *priv = mlx4_priv(dev);
3129 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3130 struct list_head *mpt_list =
3131 &tracker->slave_list[slave].res_list[RES_MPT];
3132 struct res_mpt *mpt;
3133 struct res_mpt *tmp;
3134 int state;
3135 u64 in_param;
3136 LIST_HEAD(tlist);
3137 int mptn;
3138 int err;
3139
3140 err = move_all_busy(dev, slave, RES_MPT);
3141 if (err)
3142 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3143 "busy for slave %d\n", slave);
3144
3145 spin_lock_irq(mlx4_tlock(dev));
3146 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3147 spin_unlock_irq(mlx4_tlock(dev));
3148 if (mpt->com.owner == slave) {
3149 mptn = mpt->com.res_id;
3150 state = mpt->com.from_state;
3151 while (state != 0) {
3152 switch (state) {
3153 case RES_MPT_RESERVED:
3154 __mlx4_mr_release(dev, mpt->key);
3155 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3156 rb_erase(&mpt->com.node,
3157 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
3158 list_del(&mpt->com.list);
3159 spin_unlock_irq(mlx4_tlock(dev));
3160 kfree(mpt);
3161 state = 0;
3162 break;
3163
3164 case RES_MPT_MAPPED:
3165 __mlx4_mr_free_icm(dev, mpt->key);
3166 state = RES_MPT_RESERVED;
3167 break;
3168
3169 case RES_MPT_HW:
3170 in_param = slave;
3171 err = mlx4_cmd(dev, in_param, mptn, 0,
3172 MLX4_CMD_HW2SW_MPT,
3173 MLX4_CMD_TIME_CLASS_A,
3174 MLX4_CMD_NATIVE);
3175 if (err)
3176 mlx4_dbg(dev, "rem_slave_mrs: failed"
3177 " to move slave %d mpt %d to"
3178 " SW ownership\n",
3179 slave, mptn);
3180 if (mpt->mtt)
3181 atomic_dec(&mpt->mtt->ref_count);
3182 state = RES_MPT_MAPPED;
3183 break;
3184 default:
3185 state = 0;
3186 }
3187 }
3188 }
3189 spin_lock_irq(mlx4_tlock(dev));
3190 }
3191 spin_unlock_irq(mlx4_tlock(dev));
3192}
3193
3194static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3195{
3196 struct mlx4_priv *priv = mlx4_priv(dev);
3197 struct mlx4_resource_tracker *tracker =
3198 &priv->mfunc.master.res_tracker;
3199 struct list_head *mtt_list =
3200 &tracker->slave_list[slave].res_list[RES_MTT];
3201 struct res_mtt *mtt;
3202 struct res_mtt *tmp;
3203 int state;
3204 LIST_HEAD(tlist);
3205 int base;
3206 int err;
3207
3208 err = move_all_busy(dev, slave, RES_MTT);
3209 if (err)
3210 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3211 "busy for slave %d\n", slave);
3212
3213 spin_lock_irq(mlx4_tlock(dev));
3214 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3215 spin_unlock_irq(mlx4_tlock(dev));
3216 if (mtt->com.owner == slave) {
3217 base = mtt->com.res_id;
3218 state = mtt->com.from_state;
3219 while (state != 0) {
3220 switch (state) {
3221 case RES_MTT_ALLOCATED:
3222 __mlx4_free_mtt_range(dev, base,
3223 mtt->order);
3224 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3225 rb_erase(&mtt->com.node,
3226 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
3227 list_del(&mtt->com.list);
3228 spin_unlock_irq(mlx4_tlock(dev));
3229 kfree(mtt);
3230 state = 0;
3231 break;
3232
3233 default:
3234 state = 0;
3235 }
3236 }
3237 }
3238 spin_lock_irq(mlx4_tlock(dev));
3239 }
3240 spin_unlock_irq(mlx4_tlock(dev));
3241}
3242
1b9c6b06
HHZ
3243static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3244{
3245 struct mlx4_priv *priv = mlx4_priv(dev);
3246 struct mlx4_resource_tracker *tracker =
3247 &priv->mfunc.master.res_tracker;
3248 struct list_head *fs_rule_list =
3249 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3250 struct res_fs_rule *fs_rule;
3251 struct res_fs_rule *tmp;
3252 int state;
3253 u64 base;
3254 int err;
3255
3256 err = move_all_busy(dev, slave, RES_FS_RULE);
3257 if (err)
3258 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3259 slave);
3260
3261 spin_lock_irq(mlx4_tlock(dev));
3262 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3263 spin_unlock_irq(mlx4_tlock(dev));
3264 if (fs_rule->com.owner == slave) {
3265 base = fs_rule->com.res_id;
3266 state = fs_rule->com.from_state;
3267 while (state != 0) {
3268 switch (state) {
3269 case RES_FS_RULE_ALLOCATED:
3270 /* detach rule */
3271 err = mlx4_cmd(dev, base, 0, 0,
3272 MLX4_QP_FLOW_STEERING_DETACH,
3273 MLX4_CMD_TIME_CLASS_A,
3274 MLX4_CMD_NATIVE);
3275
3276 spin_lock_irq(mlx4_tlock(dev));
3277 rb_erase(&fs_rule->com.node,
3278 &tracker->res_tree[RES_FS_RULE]);
3279 list_del(&fs_rule->com.list);
3280 spin_unlock_irq(mlx4_tlock(dev));
3281 kfree(fs_rule);
3282 state = 0;
3283 break;
3284
3285 default:
3286 state = 0;
3287 }
3288 }
3289 }
3290 spin_lock_irq(mlx4_tlock(dev));
3291 }
3292 spin_unlock_irq(mlx4_tlock(dev));
3293}
3294
c82e9aa0
EC
3295static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3296{
3297 struct mlx4_priv *priv = mlx4_priv(dev);
3298 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3299 struct list_head *eq_list =
3300 &tracker->slave_list[slave].res_list[RES_EQ];
3301 struct res_eq *eq;
3302 struct res_eq *tmp;
3303 int err;
3304 int state;
3305 LIST_HEAD(tlist);
3306 int eqn;
3307 struct mlx4_cmd_mailbox *mailbox;
3308
3309 err = move_all_busy(dev, slave, RES_EQ);
3310 if (err)
3311 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3312 "busy for slave %d\n", slave);
3313
3314 spin_lock_irq(mlx4_tlock(dev));
3315 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3316 spin_unlock_irq(mlx4_tlock(dev));
3317 if (eq->com.owner == slave) {
3318 eqn = eq->com.res_id;
3319 state = eq->com.from_state;
3320 while (state != 0) {
3321 switch (state) {
3322 case RES_EQ_RESERVED:
3323 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3324 rb_erase(&eq->com.node,
3325 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
3326 list_del(&eq->com.list);
3327 spin_unlock_irq(mlx4_tlock(dev));
3328 kfree(eq);
3329 state = 0;
3330 break;
3331
3332 case RES_EQ_HW:
3333 mailbox = mlx4_alloc_cmd_mailbox(dev);
3334 if (IS_ERR(mailbox)) {
3335 cond_resched();
3336 continue;
3337 }
3338 err = mlx4_cmd_box(dev, slave, 0,
3339 eqn & 0xff, 0,
3340 MLX4_CMD_HW2SW_EQ,
3341 MLX4_CMD_TIME_CLASS_A,
3342 MLX4_CMD_NATIVE);
eb71d0d6
JM
3343 if (err)
3344 mlx4_dbg(dev, "rem_slave_eqs: failed"
3345 " to move slave %d eqs %d to"
3346 " SW ownership\n", slave, eqn);
c82e9aa0 3347 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
3348 atomic_dec(&eq->mtt->ref_count);
3349 state = RES_EQ_RESERVED;
c82e9aa0
EC
3350 break;
3351
3352 default:
3353 state = 0;
3354 }
3355 }
3356 }
3357 spin_lock_irq(mlx4_tlock(dev));
3358 }
3359 spin_unlock_irq(mlx4_tlock(dev));
3360}
3361
ba062d52
JM
3362static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3363{
3364 struct mlx4_priv *priv = mlx4_priv(dev);
3365 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3366 struct list_head *counter_list =
3367 &tracker->slave_list[slave].res_list[RES_COUNTER];
3368 struct res_counter *counter;
3369 struct res_counter *tmp;
3370 int err;
3371 int index;
3372
3373 err = move_all_busy(dev, slave, RES_COUNTER);
3374 if (err)
3375 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3376 "busy for slave %d\n", slave);
3377
3378 spin_lock_irq(mlx4_tlock(dev));
3379 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3380 if (counter->com.owner == slave) {
3381 index = counter->com.res_id;
4af1c048
HHZ
3382 rb_erase(&counter->com.node,
3383 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
3384 list_del(&counter->com.list);
3385 kfree(counter);
3386 __mlx4_counter_free(dev, index);
3387 }
3388 }
3389 spin_unlock_irq(mlx4_tlock(dev));
3390}
3391
3392static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3393{
3394 struct mlx4_priv *priv = mlx4_priv(dev);
3395 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3396 struct list_head *xrcdn_list =
3397 &tracker->slave_list[slave].res_list[RES_XRCD];
3398 struct res_xrcdn *xrcd;
3399 struct res_xrcdn *tmp;
3400 int err;
3401 int xrcdn;
3402
3403 err = move_all_busy(dev, slave, RES_XRCD);
3404 if (err)
3405 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3406 "busy for slave %d\n", slave);
3407
3408 spin_lock_irq(mlx4_tlock(dev));
3409 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3410 if (xrcd->com.owner == slave) {
3411 xrcdn = xrcd->com.res_id;
4af1c048 3412 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
3413 list_del(&xrcd->com.list);
3414 kfree(xrcd);
3415 __mlx4_xrcd_free(dev, xrcdn);
3416 }
3417 }
3418 spin_unlock_irq(mlx4_tlock(dev));
3419}
3420
c82e9aa0
EC
3421void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3422{
3423 struct mlx4_priv *priv = mlx4_priv(dev);
3424
3425 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3426 /*VLAN*/
3427 rem_slave_macs(dev, slave);
3428 rem_slave_qps(dev, slave);
3429 rem_slave_srqs(dev, slave);
3430 rem_slave_cqs(dev, slave);
3431 rem_slave_mrs(dev, slave);
3432 rem_slave_eqs(dev, slave);
3433 rem_slave_mtts(dev, slave);
ba062d52
JM
3434 rem_slave_counters(dev, slave);
3435 rem_slave_xrcdns(dev, slave);
1b9c6b06 3436 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
3437 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3438}