2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
59 struct list_head list;
74 struct list_head list;
76 enum mlx4_protocol prot;
77 enum mlx4_steer_type steer;
82 RES_QP_BUSY = RES_ANY_BUSY,
84 /* QP number was allocated */
87 /* ICM memory for QP context was mapped */
90 /* QP is in hw ownership */
95 struct res_common com;
100 struct list_head mcg_list;
108 enum res_mtt_states {
109 RES_MTT_BUSY = RES_ANY_BUSY,
113 static inline const char *mtt_states_str(enum res_mtt_states state)
116 case RES_MTT_BUSY: return "RES_MTT_BUSY";
117 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
118 default: return "Unknown";
123 struct res_common com;
128 enum res_mpt_states {
129 RES_MPT_BUSY = RES_ANY_BUSY,
136 struct res_common com;
142 RES_EQ_BUSY = RES_ANY_BUSY,
148 struct res_common com;
153 RES_CQ_BUSY = RES_ANY_BUSY,
159 struct res_common com;
164 enum res_srq_states {
165 RES_SRQ_BUSY = RES_ANY_BUSY,
171 struct res_common com;
177 enum res_counter_states {
178 RES_COUNTER_BUSY = RES_ANY_BUSY,
179 RES_COUNTER_ALLOCATED,
183 struct res_common com;
187 enum res_xrcdn_states {
188 RES_XRCD_BUSY = RES_ANY_BUSY,
193 struct res_common com;
197 enum res_fs_rule_states {
198 RES_FS_RULE_BUSY = RES_ANY_BUSY,
199 RES_FS_RULE_ALLOCATED,
203 struct res_common com;
207 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
209 struct rb_node *node = root->rb_node;
212 struct res_common *res = container_of(node, struct res_common,
215 if (res_id < res->res_id)
216 node = node->rb_left;
217 else if (res_id > res->res_id)
218 node = node->rb_right;
225 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
227 struct rb_node **new = &(root->rb_node), *parent = NULL;
229 /* Figure out where to put new node */
231 struct res_common *this = container_of(*new, struct res_common,
235 if (res->res_id < this->res_id)
236 new = &((*new)->rb_left);
237 else if (res->res_id > this->res_id)
238 new = &((*new)->rb_right);
243 /* Add new node and rebalance tree. */
244 rb_link_node(&res->node, parent, new);
245 rb_insert_color(&res->node, root);
260 static const char *ResourceType(enum mlx4_resource rt)
263 case RES_QP: return "RES_QP";
264 case RES_CQ: return "RES_CQ";
265 case RES_SRQ: return "RES_SRQ";
266 case RES_MPT: return "RES_MPT";
267 case RES_MTT: return "RES_MTT";
268 case RES_MAC: return "RES_MAC";
269 case RES_EQ: return "RES_EQ";
270 case RES_COUNTER: return "RES_COUNTER";
271 case RES_FS_RULE: return "RES_FS_RULE";
272 case RES_XRCD: return "RES_XRCD";
273 default: return "Unknown resource type !!!";
277 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
279 struct mlx4_priv *priv = mlx4_priv(dev);
283 priv->mfunc.master.res_tracker.slave_list =
284 kzalloc(dev->num_slaves * sizeof(struct slave_list),
286 if (!priv->mfunc.master.res_tracker.slave_list)
289 for (i = 0 ; i < dev->num_slaves; i++) {
290 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
291 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
292 slave_list[i].res_list[t]);
293 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
296 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
298 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
299 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
301 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
305 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
306 enum mlx4_res_tracker_free_type type)
308 struct mlx4_priv *priv = mlx4_priv(dev);
311 if (priv->mfunc.master.res_tracker.slave_list) {
312 if (type != RES_TR_FREE_STRUCTS_ONLY)
313 for (i = 0 ; i < dev->num_slaves; i++)
314 if (type == RES_TR_FREE_ALL ||
315 dev->caps.function != i)
316 mlx4_delete_all_resources_for_slave(dev, i);
318 if (type != RES_TR_FREE_SLAVES_ONLY) {
319 kfree(priv->mfunc.master.res_tracker.slave_list);
320 priv->mfunc.master.res_tracker.slave_list = NULL;
325 static void update_pkey_index(struct mlx4_dev *dev, int slave,
326 struct mlx4_cmd_mailbox *inbox)
328 u8 sched = *(u8 *)(inbox->buf + 64);
329 u8 orig_index = *(u8 *)(inbox->buf + 35);
331 struct mlx4_priv *priv = mlx4_priv(dev);
334 port = (sched >> 6 & 1) + 1;
336 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
337 *(u8 *)(inbox->buf + 35) = new_index;
340 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
343 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
344 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
345 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
347 if (MLX4_QP_ST_UD == ts)
348 qp_ctx->pri_path.mgid_index = 0x80 | slave;
350 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
351 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
352 qp_ctx->pri_path.mgid_index = slave & 0x7F;
353 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
354 qp_ctx->alt_path.mgid_index = slave & 0x7F;
358 static int update_vport_qp_param(struct mlx4_dev *dev,
359 struct mlx4_cmd_mailbox *inbox,
362 struct mlx4_qp_context *qpc = inbox->buf + 8;
363 struct mlx4_vport_oper_state *vp_oper;
364 struct mlx4_priv *priv;
368 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
369 priv = mlx4_priv(dev);
370 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
372 if (MLX4_VGT != vp_oper->state.default_vlan) {
373 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
374 if (MLX4_QP_ST_RC == qp_type ||
375 (MLX4_QP_ST_UD == qp_type &&
376 !mlx4_is_qp_reserved(dev, qpn)))
379 /* the reserved QPs (special, proxy, tunnel)
380 * do not operate over vlans
382 if (mlx4_is_qp_reserved(dev, qpn))
385 /* force strip vlan by clear vsd */
386 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
388 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
389 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
390 qpc->pri_path.vlan_control =
391 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
392 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
393 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
394 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
395 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
396 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
397 } else if (0 != vp_oper->state.default_vlan) {
398 qpc->pri_path.vlan_control =
399 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
400 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
401 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
402 } else { /* priority tagged */
403 qpc->pri_path.vlan_control =
404 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
405 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
408 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
409 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
410 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
411 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
412 qpc->pri_path.sched_queue &= 0xC7;
413 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
415 if (vp_oper->state.spoofchk) {
416 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
417 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
422 static int mpt_mask(struct mlx4_dev *dev)
424 return dev->caps.num_mpts - 1;
427 static void *find_res(struct mlx4_dev *dev, u64 res_id,
428 enum mlx4_resource type)
430 struct mlx4_priv *priv = mlx4_priv(dev);
432 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
436 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
437 enum mlx4_resource type,
440 struct res_common *r;
443 spin_lock_irq(mlx4_tlock(dev));
444 r = find_res(dev, res_id, type);
450 if (r->state == RES_ANY_BUSY) {
455 if (r->owner != slave) {
460 r->from_state = r->state;
461 r->state = RES_ANY_BUSY;
464 *((struct res_common **)res) = r;
467 spin_unlock_irq(mlx4_tlock(dev));
471 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
472 enum mlx4_resource type,
473 u64 res_id, int *slave)
476 struct res_common *r;
482 spin_lock(mlx4_tlock(dev));
484 r = find_res(dev, id, type);
489 spin_unlock(mlx4_tlock(dev));
494 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
495 enum mlx4_resource type)
497 struct res_common *r;
499 spin_lock_irq(mlx4_tlock(dev));
500 r = find_res(dev, res_id, type);
502 r->state = r->from_state;
503 spin_unlock_irq(mlx4_tlock(dev));
506 static struct res_common *alloc_qp_tr(int id)
510 ret = kzalloc(sizeof *ret, GFP_KERNEL);
514 ret->com.res_id = id;
515 ret->com.state = RES_QP_RESERVED;
517 INIT_LIST_HEAD(&ret->mcg_list);
518 spin_lock_init(&ret->mcg_spl);
519 atomic_set(&ret->ref_count, 0);
524 static struct res_common *alloc_mtt_tr(int id, int order)
528 ret = kzalloc(sizeof *ret, GFP_KERNEL);
532 ret->com.res_id = id;
534 ret->com.state = RES_MTT_ALLOCATED;
535 atomic_set(&ret->ref_count, 0);
540 static struct res_common *alloc_mpt_tr(int id, int key)
544 ret = kzalloc(sizeof *ret, GFP_KERNEL);
548 ret->com.res_id = id;
549 ret->com.state = RES_MPT_RESERVED;
555 static struct res_common *alloc_eq_tr(int id)
559 ret = kzalloc(sizeof *ret, GFP_KERNEL);
563 ret->com.res_id = id;
564 ret->com.state = RES_EQ_RESERVED;
569 static struct res_common *alloc_cq_tr(int id)
573 ret = kzalloc(sizeof *ret, GFP_KERNEL);
577 ret->com.res_id = id;
578 ret->com.state = RES_CQ_ALLOCATED;
579 atomic_set(&ret->ref_count, 0);
584 static struct res_common *alloc_srq_tr(int id)
588 ret = kzalloc(sizeof *ret, GFP_KERNEL);
592 ret->com.res_id = id;
593 ret->com.state = RES_SRQ_ALLOCATED;
594 atomic_set(&ret->ref_count, 0);
599 static struct res_common *alloc_counter_tr(int id)
601 struct res_counter *ret;
603 ret = kzalloc(sizeof *ret, GFP_KERNEL);
607 ret->com.res_id = id;
608 ret->com.state = RES_COUNTER_ALLOCATED;
613 static struct res_common *alloc_xrcdn_tr(int id)
615 struct res_xrcdn *ret;
617 ret = kzalloc(sizeof *ret, GFP_KERNEL);
621 ret->com.res_id = id;
622 ret->com.state = RES_XRCD_ALLOCATED;
627 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
629 struct res_fs_rule *ret;
631 ret = kzalloc(sizeof *ret, GFP_KERNEL);
635 ret->com.res_id = id;
636 ret->com.state = RES_FS_RULE_ALLOCATED;
641 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
644 struct res_common *ret;
648 ret = alloc_qp_tr(id);
651 ret = alloc_mpt_tr(id, extra);
654 ret = alloc_mtt_tr(id, extra);
657 ret = alloc_eq_tr(id);
660 ret = alloc_cq_tr(id);
663 ret = alloc_srq_tr(id);
666 printk(KERN_ERR "implementation missing\n");
669 ret = alloc_counter_tr(id);
672 ret = alloc_xrcdn_tr(id);
675 ret = alloc_fs_rule_tr(id, extra);
686 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
687 enum mlx4_resource type, int extra)
691 struct mlx4_priv *priv = mlx4_priv(dev);
692 struct res_common **res_arr;
693 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
694 struct rb_root *root = &tracker->res_tree[type];
696 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
700 for (i = 0; i < count; ++i) {
701 res_arr[i] = alloc_tr(base + i, type, slave, extra);
703 for (--i; i >= 0; --i)
711 spin_lock_irq(mlx4_tlock(dev));
712 for (i = 0; i < count; ++i) {
713 if (find_res(dev, base + i, type)) {
717 err = res_tracker_insert(root, res_arr[i]);
720 list_add_tail(&res_arr[i]->list,
721 &tracker->slave_list[slave].res_list[type]);
723 spin_unlock_irq(mlx4_tlock(dev));
729 for (--i; i >= base; --i)
730 rb_erase(&res_arr[i]->node, root);
732 spin_unlock_irq(mlx4_tlock(dev));
734 for (i = 0; i < count; ++i)
742 static int remove_qp_ok(struct res_qp *res)
744 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
745 !list_empty(&res->mcg_list)) {
746 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
747 res->com.state, atomic_read(&res->ref_count));
749 } else if (res->com.state != RES_QP_RESERVED) {
756 static int remove_mtt_ok(struct res_mtt *res, int order)
758 if (res->com.state == RES_MTT_BUSY ||
759 atomic_read(&res->ref_count)) {
760 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
762 mtt_states_str(res->com.state),
763 atomic_read(&res->ref_count));
765 } else if (res->com.state != RES_MTT_ALLOCATED)
767 else if (res->order != order)
773 static int remove_mpt_ok(struct res_mpt *res)
775 if (res->com.state == RES_MPT_BUSY)
777 else if (res->com.state != RES_MPT_RESERVED)
783 static int remove_eq_ok(struct res_eq *res)
785 if (res->com.state == RES_MPT_BUSY)
787 else if (res->com.state != RES_MPT_RESERVED)
793 static int remove_counter_ok(struct res_counter *res)
795 if (res->com.state == RES_COUNTER_BUSY)
797 else if (res->com.state != RES_COUNTER_ALLOCATED)
803 static int remove_xrcdn_ok(struct res_xrcdn *res)
805 if (res->com.state == RES_XRCD_BUSY)
807 else if (res->com.state != RES_XRCD_ALLOCATED)
813 static int remove_fs_rule_ok(struct res_fs_rule *res)
815 if (res->com.state == RES_FS_RULE_BUSY)
817 else if (res->com.state != RES_FS_RULE_ALLOCATED)
823 static int remove_cq_ok(struct res_cq *res)
825 if (res->com.state == RES_CQ_BUSY)
827 else if (res->com.state != RES_CQ_ALLOCATED)
833 static int remove_srq_ok(struct res_srq *res)
835 if (res->com.state == RES_SRQ_BUSY)
837 else if (res->com.state != RES_SRQ_ALLOCATED)
843 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
847 return remove_qp_ok((struct res_qp *)res);
849 return remove_cq_ok((struct res_cq *)res);
851 return remove_srq_ok((struct res_srq *)res);
853 return remove_mpt_ok((struct res_mpt *)res);
855 return remove_mtt_ok((struct res_mtt *)res, extra);
859 return remove_eq_ok((struct res_eq *)res);
861 return remove_counter_ok((struct res_counter *)res);
863 return remove_xrcdn_ok((struct res_xrcdn *)res);
865 return remove_fs_rule_ok((struct res_fs_rule *)res);
871 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
872 enum mlx4_resource type, int extra)
876 struct mlx4_priv *priv = mlx4_priv(dev);
877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
878 struct res_common *r;
880 spin_lock_irq(mlx4_tlock(dev));
881 for (i = base; i < base + count; ++i) {
882 r = res_tracker_lookup(&tracker->res_tree[type], i);
887 if (r->owner != slave) {
891 err = remove_ok(r, type, extra);
896 for (i = base; i < base + count; ++i) {
897 r = res_tracker_lookup(&tracker->res_tree[type], i);
898 rb_erase(&r->node, &tracker->res_tree[type]);
905 spin_unlock_irq(mlx4_tlock(dev));
910 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
911 enum res_qp_states state, struct res_qp **qp,
914 struct mlx4_priv *priv = mlx4_priv(dev);
915 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
919 spin_lock_irq(mlx4_tlock(dev));
920 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
923 else if (r->com.owner != slave)
928 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
929 __func__, r->com.res_id);
933 case RES_QP_RESERVED:
934 if (r->com.state == RES_QP_MAPPED && !alloc)
937 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
942 if ((r->com.state == RES_QP_RESERVED && alloc) ||
943 r->com.state == RES_QP_HW)
946 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
954 if (r->com.state != RES_QP_MAPPED)
962 r->com.from_state = r->com.state;
963 r->com.to_state = state;
964 r->com.state = RES_QP_BUSY;
970 spin_unlock_irq(mlx4_tlock(dev));
975 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
976 enum res_mpt_states state, struct res_mpt **mpt)
978 struct mlx4_priv *priv = mlx4_priv(dev);
979 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
983 spin_lock_irq(mlx4_tlock(dev));
984 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
987 else if (r->com.owner != slave)
995 case RES_MPT_RESERVED:
996 if (r->com.state != RES_MPT_MAPPED)
1000 case RES_MPT_MAPPED:
1001 if (r->com.state != RES_MPT_RESERVED &&
1002 r->com.state != RES_MPT_HW)
1007 if (r->com.state != RES_MPT_MAPPED)
1015 r->com.from_state = r->com.state;
1016 r->com.to_state = state;
1017 r->com.state = RES_MPT_BUSY;
1023 spin_unlock_irq(mlx4_tlock(dev));
1028 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1029 enum res_eq_states state, struct res_eq **eq)
1031 struct mlx4_priv *priv = mlx4_priv(dev);
1032 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1036 spin_lock_irq(mlx4_tlock(dev));
1037 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1040 else if (r->com.owner != slave)
1048 case RES_EQ_RESERVED:
1049 if (r->com.state != RES_EQ_HW)
1054 if (r->com.state != RES_EQ_RESERVED)
1063 r->com.from_state = r->com.state;
1064 r->com.to_state = state;
1065 r->com.state = RES_EQ_BUSY;
1071 spin_unlock_irq(mlx4_tlock(dev));
1076 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1077 enum res_cq_states state, struct res_cq **cq)
1079 struct mlx4_priv *priv = mlx4_priv(dev);
1080 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1084 spin_lock_irq(mlx4_tlock(dev));
1085 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1088 else if (r->com.owner != slave)
1096 case RES_CQ_ALLOCATED:
1097 if (r->com.state != RES_CQ_HW)
1099 else if (atomic_read(&r->ref_count))
1106 if (r->com.state != RES_CQ_ALLOCATED)
1117 r->com.from_state = r->com.state;
1118 r->com.to_state = state;
1119 r->com.state = RES_CQ_BUSY;
1125 spin_unlock_irq(mlx4_tlock(dev));
1130 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1131 enum res_cq_states state, struct res_srq **srq)
1133 struct mlx4_priv *priv = mlx4_priv(dev);
1134 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1138 spin_lock_irq(mlx4_tlock(dev));
1139 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1142 else if (r->com.owner != slave)
1150 case RES_SRQ_ALLOCATED:
1151 if (r->com.state != RES_SRQ_HW)
1153 else if (atomic_read(&r->ref_count))
1158 if (r->com.state != RES_SRQ_ALLOCATED)
1167 r->com.from_state = r->com.state;
1168 r->com.to_state = state;
1169 r->com.state = RES_SRQ_BUSY;
1175 spin_unlock_irq(mlx4_tlock(dev));
1180 static void res_abort_move(struct mlx4_dev *dev, int slave,
1181 enum mlx4_resource type, int id)
1183 struct mlx4_priv *priv = mlx4_priv(dev);
1184 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1185 struct res_common *r;
1187 spin_lock_irq(mlx4_tlock(dev));
1188 r = res_tracker_lookup(&tracker->res_tree[type], id);
1189 if (r && (r->owner == slave))
1190 r->state = r->from_state;
1191 spin_unlock_irq(mlx4_tlock(dev));
1194 static void res_end_move(struct mlx4_dev *dev, int slave,
1195 enum mlx4_resource type, int id)
1197 struct mlx4_priv *priv = mlx4_priv(dev);
1198 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1199 struct res_common *r;
1201 spin_lock_irq(mlx4_tlock(dev));
1202 r = res_tracker_lookup(&tracker->res_tree[type], id);
1203 if (r && (r->owner == slave))
1204 r->state = r->to_state;
1205 spin_unlock_irq(mlx4_tlock(dev));
1208 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1210 return mlx4_is_qp_reserved(dev, qpn) &&
1211 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1214 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1216 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1219 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1220 u64 in_param, u64 *out_param)
1229 case RES_OP_RESERVE:
1230 count = get_param_l(&in_param);
1231 align = get_param_h(&in_param);
1232 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1236 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1238 __mlx4_qp_release_range(dev, base, count);
1241 set_param_l(out_param, base);
1243 case RES_OP_MAP_ICM:
1244 qpn = get_param_l(&in_param) & 0x7fffff;
1245 if (valid_reserved(dev, slave, qpn)) {
1246 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1251 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1256 if (!fw_reserved(dev, qpn)) {
1257 err = __mlx4_qp_alloc_icm(dev, qpn);
1259 res_abort_move(dev, slave, RES_QP, qpn);
1264 res_end_move(dev, slave, RES_QP, qpn);
1274 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1275 u64 in_param, u64 *out_param)
1281 if (op != RES_OP_RESERVE_AND_MAP)
1284 order = get_param_l(&in_param);
1285 base = __mlx4_alloc_mtt_range(dev, order);
1289 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1291 __mlx4_free_mtt_range(dev, base, order);
1293 set_param_l(out_param, base);
1298 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1299 u64 in_param, u64 *out_param)
1304 struct res_mpt *mpt;
1307 case RES_OP_RESERVE:
1308 index = __mlx4_mpt_reserve(dev);
1311 id = index & mpt_mask(dev);
1313 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1315 __mlx4_mpt_release(dev, index);
1318 set_param_l(out_param, index);
1320 case RES_OP_MAP_ICM:
1321 index = get_param_l(&in_param);
1322 id = index & mpt_mask(dev);
1323 err = mr_res_start_move_to(dev, slave, id,
1324 RES_MPT_MAPPED, &mpt);
1328 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1330 res_abort_move(dev, slave, RES_MPT, id);
1334 res_end_move(dev, slave, RES_MPT, id);
1340 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1341 u64 in_param, u64 *out_param)
1347 case RES_OP_RESERVE_AND_MAP:
1348 err = __mlx4_cq_alloc_icm(dev, &cqn);
1352 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1354 __mlx4_cq_free_icm(dev, cqn);
1358 set_param_l(out_param, cqn);
1368 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1369 u64 in_param, u64 *out_param)
1375 case RES_OP_RESERVE_AND_MAP:
1376 err = __mlx4_srq_alloc_icm(dev, &srqn);
1380 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1382 __mlx4_srq_free_icm(dev, srqn);
1386 set_param_l(out_param, srqn);
1396 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1398 struct mlx4_priv *priv = mlx4_priv(dev);
1399 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1400 struct mac_res *res;
1402 res = kzalloc(sizeof *res, GFP_KERNEL);
1406 res->port = (u8) port;
1407 list_add_tail(&res->list,
1408 &tracker->slave_list[slave].res_list[RES_MAC]);
1412 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1415 struct mlx4_priv *priv = mlx4_priv(dev);
1416 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1417 struct list_head *mac_list =
1418 &tracker->slave_list[slave].res_list[RES_MAC];
1419 struct mac_res *res, *tmp;
1421 list_for_each_entry_safe(res, tmp, mac_list, list) {
1422 if (res->mac == mac && res->port == (u8) port) {
1423 list_del(&res->list);
1430 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1432 struct mlx4_priv *priv = mlx4_priv(dev);
1433 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1434 struct list_head *mac_list =
1435 &tracker->slave_list[slave].res_list[RES_MAC];
1436 struct mac_res *res, *tmp;
1438 list_for_each_entry_safe(res, tmp, mac_list, list) {
1439 list_del(&res->list);
1440 __mlx4_unregister_mac(dev, res->port, res->mac);
1445 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446 u64 in_param, u64 *out_param)
1452 if (op != RES_OP_RESERVE_AND_MAP)
1455 port = get_param_l(out_param);
1458 err = __mlx4_register_mac(dev, port, mac);
1460 set_param_l(out_param, err);
1465 err = mac_add_to_slave(dev, slave, mac, port);
1467 __mlx4_unregister_mac(dev, port, mac);
1472 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1473 u64 in_param, u64 *out_param)
1478 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1479 u64 in_param, u64 *out_param)
1484 if (op != RES_OP_RESERVE)
1487 err = __mlx4_counter_alloc(dev, &index);
1491 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1493 __mlx4_counter_free(dev, index);
1495 set_param_l(out_param, index);
1500 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1501 u64 in_param, u64 *out_param)
1506 if (op != RES_OP_RESERVE)
1509 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1513 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1515 __mlx4_xrcd_free(dev, xrcdn);
1517 set_param_l(out_param, xrcdn);
1522 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1523 struct mlx4_vhcr *vhcr,
1524 struct mlx4_cmd_mailbox *inbox,
1525 struct mlx4_cmd_mailbox *outbox,
1526 struct mlx4_cmd_info *cmd)
1529 int alop = vhcr->op_modifier;
1531 switch (vhcr->in_modifier) {
1533 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param);
1538 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539 vhcr->in_param, &vhcr->out_param);
1543 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544 vhcr->in_param, &vhcr->out_param);
1548 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549 vhcr->in_param, &vhcr->out_param);
1553 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1554 vhcr->in_param, &vhcr->out_param);
1558 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1559 vhcr->in_param, &vhcr->out_param);
1563 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1564 vhcr->in_param, &vhcr->out_param);
1568 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1569 vhcr->in_param, &vhcr->out_param);
1573 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1574 vhcr->in_param, &vhcr->out_param);
1585 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1594 case RES_OP_RESERVE:
1595 base = get_param_l(&in_param) & 0x7fffff;
1596 count = get_param_h(&in_param);
1597 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1600 __mlx4_qp_release_range(dev, base, count);
1602 case RES_OP_MAP_ICM:
1603 qpn = get_param_l(&in_param) & 0x7fffff;
1604 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1609 if (!fw_reserved(dev, qpn))
1610 __mlx4_qp_free_icm(dev, qpn);
1612 res_end_move(dev, slave, RES_QP, qpn);
1614 if (valid_reserved(dev, slave, qpn))
1615 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1624 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1625 u64 in_param, u64 *out_param)
1631 if (op != RES_OP_RESERVE_AND_MAP)
1634 base = get_param_l(&in_param);
1635 order = get_param_h(&in_param);
1636 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1638 __mlx4_free_mtt_range(dev, base, order);
1642 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1648 struct res_mpt *mpt;
1651 case RES_OP_RESERVE:
1652 index = get_param_l(&in_param);
1653 id = index & mpt_mask(dev);
1654 err = get_res(dev, slave, id, RES_MPT, &mpt);
1658 put_res(dev, slave, id, RES_MPT);
1660 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1663 __mlx4_mpt_release(dev, index);
1665 case RES_OP_MAP_ICM:
1666 index = get_param_l(&in_param);
1667 id = index & mpt_mask(dev);
1668 err = mr_res_start_move_to(dev, slave, id,
1669 RES_MPT_RESERVED, &mpt);
1673 __mlx4_mpt_free_icm(dev, mpt->key);
1674 res_end_move(dev, slave, RES_MPT, id);
1684 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1685 u64 in_param, u64 *out_param)
1691 case RES_OP_RESERVE_AND_MAP:
1692 cqn = get_param_l(&in_param);
1693 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1697 __mlx4_cq_free_icm(dev, cqn);
1708 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1709 u64 in_param, u64 *out_param)
1715 case RES_OP_RESERVE_AND_MAP:
1716 srqn = get_param_l(&in_param);
1717 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1721 __mlx4_srq_free_icm(dev, srqn);
1732 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1733 u64 in_param, u64 *out_param)
1739 case RES_OP_RESERVE_AND_MAP:
1740 port = get_param_l(out_param);
1741 mac_del_from_slave(dev, slave, in_param, port);
1742 __mlx4_unregister_mac(dev, port, in_param);
1753 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1754 u64 in_param, u64 *out_param)
1759 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1760 u64 in_param, u64 *out_param)
1765 if (op != RES_OP_RESERVE)
1768 index = get_param_l(&in_param);
1769 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1773 __mlx4_counter_free(dev, index);
1778 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1779 u64 in_param, u64 *out_param)
1784 if (op != RES_OP_RESERVE)
1787 xrcdn = get_param_l(&in_param);
1788 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1792 __mlx4_xrcd_free(dev, xrcdn);
1797 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1798 struct mlx4_vhcr *vhcr,
1799 struct mlx4_cmd_mailbox *inbox,
1800 struct mlx4_cmd_mailbox *outbox,
1801 struct mlx4_cmd_info *cmd)
1804 int alop = vhcr->op_modifier;
1806 switch (vhcr->in_modifier) {
1808 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1813 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1814 vhcr->in_param, &vhcr->out_param);
1818 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1823 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1824 vhcr->in_param, &vhcr->out_param);
1828 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1829 vhcr->in_param, &vhcr->out_param);
1833 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1834 vhcr->in_param, &vhcr->out_param);
1838 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1839 vhcr->in_param, &vhcr->out_param);
1843 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1844 vhcr->in_param, &vhcr->out_param);
1848 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1849 vhcr->in_param, &vhcr->out_param);
1857 /* ugly but other choices are uglier */
1858 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1860 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1863 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1865 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1868 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1870 return be32_to_cpu(mpt->mtt_sz);
1873 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1875 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1878 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1880 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1883 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1885 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1888 static int mr_is_region(struct mlx4_mpt_entry *mpt)
1890 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1893 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1895 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1898 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1900 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1903 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1905 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1906 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1907 int log_sq_sride = qpc->sq_size_stride & 7;
1908 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1909 int log_rq_stride = qpc->rq_size_stride & 7;
1910 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1911 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1912 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
1913 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
1918 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1920 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1921 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1922 total_mem = sq_size + rq_size;
1924 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1930 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1931 int size, struct res_mtt *mtt)
1933 int res_start = mtt->com.res_id;
1934 int res_size = (1 << mtt->order);
1936 if (start < res_start || start + size > res_start + res_size)
1941 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1942 struct mlx4_vhcr *vhcr,
1943 struct mlx4_cmd_mailbox *inbox,
1944 struct mlx4_cmd_mailbox *outbox,
1945 struct mlx4_cmd_info *cmd)
1948 int index = vhcr->in_modifier;
1949 struct res_mtt *mtt;
1950 struct res_mpt *mpt;
1951 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1957 id = index & mpt_mask(dev);
1958 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1962 /* Disable memory windows for VFs. */
1963 if (!mr_is_region(inbox->buf)) {
1968 /* Make sure that the PD bits related to the slave id are zeros. */
1969 pd = mr_get_pd(inbox->buf);
1970 pd_slave = (pd >> 17) & 0x7f;
1971 if (pd_slave != 0 && pd_slave != slave) {
1976 if (mr_is_fmr(inbox->buf)) {
1977 /* FMR and Bind Enable are forbidden in slave devices. */
1978 if (mr_is_bind_enabled(inbox->buf)) {
1982 /* FMR and Memory Windows are also forbidden. */
1983 if (!mr_is_region(inbox->buf)) {
1989 phys = mr_phys_mpt(inbox->buf);
1991 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1995 err = check_mtt_range(dev, slave, mtt_base,
1996 mr_get_mtt_size(inbox->buf), mtt);
2003 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2008 atomic_inc(&mtt->ref_count);
2009 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2012 res_end_move(dev, slave, RES_MPT, id);
2017 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2019 res_abort_move(dev, slave, RES_MPT, id);
2024 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2025 struct mlx4_vhcr *vhcr,
2026 struct mlx4_cmd_mailbox *inbox,
2027 struct mlx4_cmd_mailbox *outbox,
2028 struct mlx4_cmd_info *cmd)
2031 int index = vhcr->in_modifier;
2032 struct res_mpt *mpt;
2035 id = index & mpt_mask(dev);
2036 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2040 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2045 atomic_dec(&mpt->mtt->ref_count);
2047 res_end_move(dev, slave, RES_MPT, id);
2051 res_abort_move(dev, slave, RES_MPT, id);
2056 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2057 struct mlx4_vhcr *vhcr,
2058 struct mlx4_cmd_mailbox *inbox,
2059 struct mlx4_cmd_mailbox *outbox,
2060 struct mlx4_cmd_info *cmd)
2063 int index = vhcr->in_modifier;
2064 struct res_mpt *mpt;
2067 id = index & mpt_mask(dev);
2068 err = get_res(dev, slave, id, RES_MPT, &mpt);
2072 if (mpt->com.from_state != RES_MPT_HW) {
2077 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2080 put_res(dev, slave, id, RES_MPT);
2084 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2086 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2089 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2091 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2094 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2096 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2099 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2100 struct mlx4_qp_context *context)
2102 u32 qpn = vhcr->in_modifier & 0xffffff;
2105 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2108 /* adjust qkey in qp context */
2109 context->qkey = cpu_to_be32(qkey);
2112 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2113 struct mlx4_vhcr *vhcr,
2114 struct mlx4_cmd_mailbox *inbox,
2115 struct mlx4_cmd_mailbox *outbox,
2116 struct mlx4_cmd_info *cmd)
2119 int qpn = vhcr->in_modifier & 0x7fffff;
2120 struct res_mtt *mtt;
2122 struct mlx4_qp_context *qpc = inbox->buf + 8;
2123 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2124 int mtt_size = qp_get_mtt_size(qpc);
2127 int rcqn = qp_get_rcqn(qpc);
2128 int scqn = qp_get_scqn(qpc);
2129 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2130 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2131 struct res_srq *srq;
2132 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2134 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2137 qp->local_qpn = local_qpn;
2138 qp->sched_queue = 0;
2139 qp->qpc_flags = be32_to_cpu(qpc->flags);
2141 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2145 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2149 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2154 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2161 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2166 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2167 update_pkey_index(dev, slave, inbox);
2168 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2171 atomic_inc(&mtt->ref_count);
2173 atomic_inc(&rcq->ref_count);
2175 atomic_inc(&scq->ref_count);
2179 put_res(dev, slave, scqn, RES_CQ);
2182 atomic_inc(&srq->ref_count);
2183 put_res(dev, slave, srqn, RES_SRQ);
2186 put_res(dev, slave, rcqn, RES_CQ);
2187 put_res(dev, slave, mtt_base, RES_MTT);
2188 res_end_move(dev, slave, RES_QP, qpn);
2194 put_res(dev, slave, srqn, RES_SRQ);
2197 put_res(dev, slave, scqn, RES_CQ);
2199 put_res(dev, slave, rcqn, RES_CQ);
2201 put_res(dev, slave, mtt_base, RES_MTT);
2203 res_abort_move(dev, slave, RES_QP, qpn);
2208 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2210 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2213 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2215 int log_eq_size = eqc->log_eq_size & 0x1f;
2216 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2218 if (log_eq_size + 5 < page_shift)
2221 return 1 << (log_eq_size + 5 - page_shift);
2224 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2226 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2229 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2231 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2232 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2234 if (log_cq_size + 5 < page_shift)
2237 return 1 << (log_cq_size + 5 - page_shift);
2240 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2241 struct mlx4_vhcr *vhcr,
2242 struct mlx4_cmd_mailbox *inbox,
2243 struct mlx4_cmd_mailbox *outbox,
2244 struct mlx4_cmd_info *cmd)
2247 int eqn = vhcr->in_modifier;
2248 int res_id = (slave << 8) | eqn;
2249 struct mlx4_eq_context *eqc = inbox->buf;
2250 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2251 int mtt_size = eq_get_mtt_size(eqc);
2253 struct res_mtt *mtt;
2255 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2258 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2262 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2266 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2270 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2274 atomic_inc(&mtt->ref_count);
2276 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2277 res_end_move(dev, slave, RES_EQ, res_id);
2281 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2283 res_abort_move(dev, slave, RES_EQ, res_id);
2285 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2289 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2290 int len, struct res_mtt **res)
2292 struct mlx4_priv *priv = mlx4_priv(dev);
2293 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2294 struct res_mtt *mtt;
2297 spin_lock_irq(mlx4_tlock(dev));
2298 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2300 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2302 mtt->com.from_state = mtt->com.state;
2303 mtt->com.state = RES_MTT_BUSY;
2308 spin_unlock_irq(mlx4_tlock(dev));
2313 static int verify_qp_parameters(struct mlx4_dev *dev,
2314 struct mlx4_cmd_mailbox *inbox,
2315 enum qp_transition transition, u8 slave)
2318 struct mlx4_qp_context *qp_ctx;
2319 enum mlx4_qp_optpar optpar;
2321 qp_ctx = inbox->buf + 8;
2322 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2323 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2328 switch (transition) {
2329 case QP_TRANS_INIT2RTR:
2330 case QP_TRANS_RTR2RTS:
2331 case QP_TRANS_RTS2RTS:
2332 case QP_TRANS_SQD2SQD:
2333 case QP_TRANS_SQD2RTS:
2334 if (slave != mlx4_master_func_num(dev))
2335 /* slaves have only gid index 0 */
2336 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2337 if (qp_ctx->pri_path.mgid_index)
2339 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2340 if (qp_ctx->alt_path.mgid_index)
2355 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2356 struct mlx4_vhcr *vhcr,
2357 struct mlx4_cmd_mailbox *inbox,
2358 struct mlx4_cmd_mailbox *outbox,
2359 struct mlx4_cmd_info *cmd)
2361 struct mlx4_mtt mtt;
2362 __be64 *page_list = inbox->buf;
2363 u64 *pg_list = (u64 *)page_list;
2365 struct res_mtt *rmtt = NULL;
2366 int start = be64_to_cpu(page_list[0]);
2367 int npages = vhcr->in_modifier;
2370 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2374 /* Call the SW implementation of write_mtt:
2375 * - Prepare a dummy mtt struct
2376 * - Translate inbox contents to simple addresses in host endianess */
2377 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2378 we don't really use it */
2381 for (i = 0; i < npages; ++i)
2382 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2384 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2385 ((u64 *)page_list + 2));
2388 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2393 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2394 struct mlx4_vhcr *vhcr,
2395 struct mlx4_cmd_mailbox *inbox,
2396 struct mlx4_cmd_mailbox *outbox,
2397 struct mlx4_cmd_info *cmd)
2399 int eqn = vhcr->in_modifier;
2400 int res_id = eqn | (slave << 8);
2404 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2408 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2412 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2416 atomic_dec(&eq->mtt->ref_count);
2417 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2418 res_end_move(dev, slave, RES_EQ, res_id);
2419 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2424 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2426 res_abort_move(dev, slave, RES_EQ, res_id);
2431 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2433 struct mlx4_priv *priv = mlx4_priv(dev);
2434 struct mlx4_slave_event_eq_info *event_eq;
2435 struct mlx4_cmd_mailbox *mailbox;
2436 u32 in_modifier = 0;
2441 if (!priv->mfunc.master.slave_state)
2444 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2446 /* Create the event only if the slave is registered */
2447 if (event_eq->eqn < 0)
2450 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2451 res_id = (slave << 8) | event_eq->eqn;
2452 err = get_res(dev, slave, res_id, RES_EQ, &req);
2456 if (req->com.from_state != RES_EQ_HW) {
2461 mailbox = mlx4_alloc_cmd_mailbox(dev);
2462 if (IS_ERR(mailbox)) {
2463 err = PTR_ERR(mailbox);
2467 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2469 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2472 memcpy(mailbox->buf, (u8 *) eqe, 28);
2474 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2476 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2477 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2480 put_res(dev, slave, res_id, RES_EQ);
2481 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2482 mlx4_free_cmd_mailbox(dev, mailbox);
2486 put_res(dev, slave, res_id, RES_EQ);
2489 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2493 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2494 struct mlx4_vhcr *vhcr,
2495 struct mlx4_cmd_mailbox *inbox,
2496 struct mlx4_cmd_mailbox *outbox,
2497 struct mlx4_cmd_info *cmd)
2499 int eqn = vhcr->in_modifier;
2500 int res_id = eqn | (slave << 8);
2504 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2508 if (eq->com.from_state != RES_EQ_HW) {
2513 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2516 put_res(dev, slave, res_id, RES_EQ);
2520 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2521 struct mlx4_vhcr *vhcr,
2522 struct mlx4_cmd_mailbox *inbox,
2523 struct mlx4_cmd_mailbox *outbox,
2524 struct mlx4_cmd_info *cmd)
2527 int cqn = vhcr->in_modifier;
2528 struct mlx4_cq_context *cqc = inbox->buf;
2529 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2531 struct res_mtt *mtt;
2533 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2536 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2539 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2542 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2545 atomic_inc(&mtt->ref_count);
2547 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2548 res_end_move(dev, slave, RES_CQ, cqn);
2552 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2554 res_abort_move(dev, slave, RES_CQ, cqn);
2558 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2559 struct mlx4_vhcr *vhcr,
2560 struct mlx4_cmd_mailbox *inbox,
2561 struct mlx4_cmd_mailbox *outbox,
2562 struct mlx4_cmd_info *cmd)
2565 int cqn = vhcr->in_modifier;
2568 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2571 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2574 atomic_dec(&cq->mtt->ref_count);
2575 res_end_move(dev, slave, RES_CQ, cqn);
2579 res_abort_move(dev, slave, RES_CQ, cqn);
2583 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2584 struct mlx4_vhcr *vhcr,
2585 struct mlx4_cmd_mailbox *inbox,
2586 struct mlx4_cmd_mailbox *outbox,
2587 struct mlx4_cmd_info *cmd)
2589 int cqn = vhcr->in_modifier;
2593 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2597 if (cq->com.from_state != RES_CQ_HW)
2600 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2602 put_res(dev, slave, cqn, RES_CQ);
2607 static int handle_resize(struct mlx4_dev *dev, int slave,
2608 struct mlx4_vhcr *vhcr,
2609 struct mlx4_cmd_mailbox *inbox,
2610 struct mlx4_cmd_mailbox *outbox,
2611 struct mlx4_cmd_info *cmd,
2615 struct res_mtt *orig_mtt;
2616 struct res_mtt *mtt;
2617 struct mlx4_cq_context *cqc = inbox->buf;
2618 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2620 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2624 if (orig_mtt != cq->mtt) {
2629 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2633 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2636 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2639 atomic_dec(&orig_mtt->ref_count);
2640 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2641 atomic_inc(&mtt->ref_count);
2643 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2647 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2649 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2655 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2656 struct mlx4_vhcr *vhcr,
2657 struct mlx4_cmd_mailbox *inbox,
2658 struct mlx4_cmd_mailbox *outbox,
2659 struct mlx4_cmd_info *cmd)
2661 int cqn = vhcr->in_modifier;
2665 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2669 if (cq->com.from_state != RES_CQ_HW)
2672 if (vhcr->op_modifier == 0) {
2673 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2677 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2679 put_res(dev, slave, cqn, RES_CQ);
2684 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2686 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2687 int log_rq_stride = srqc->logstride & 7;
2688 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2690 if (log_srq_size + log_rq_stride + 4 < page_shift)
2693 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2696 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2697 struct mlx4_vhcr *vhcr,
2698 struct mlx4_cmd_mailbox *inbox,
2699 struct mlx4_cmd_mailbox *outbox,
2700 struct mlx4_cmd_info *cmd)
2703 int srqn = vhcr->in_modifier;
2704 struct res_mtt *mtt;
2705 struct res_srq *srq;
2706 struct mlx4_srq_context *srqc = inbox->buf;
2707 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2709 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2712 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2715 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2718 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2723 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2727 atomic_inc(&mtt->ref_count);
2729 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2730 res_end_move(dev, slave, RES_SRQ, srqn);
2734 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2736 res_abort_move(dev, slave, RES_SRQ, srqn);
2741 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2742 struct mlx4_vhcr *vhcr,
2743 struct mlx4_cmd_mailbox *inbox,
2744 struct mlx4_cmd_mailbox *outbox,
2745 struct mlx4_cmd_info *cmd)
2748 int srqn = vhcr->in_modifier;
2749 struct res_srq *srq;
2751 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2754 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2757 atomic_dec(&srq->mtt->ref_count);
2759 atomic_dec(&srq->cq->ref_count);
2760 res_end_move(dev, slave, RES_SRQ, srqn);
2765 res_abort_move(dev, slave, RES_SRQ, srqn);
2770 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2771 struct mlx4_vhcr *vhcr,
2772 struct mlx4_cmd_mailbox *inbox,
2773 struct mlx4_cmd_mailbox *outbox,
2774 struct mlx4_cmd_info *cmd)
2777 int srqn = vhcr->in_modifier;
2778 struct res_srq *srq;
2780 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2783 if (srq->com.from_state != RES_SRQ_HW) {
2787 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2789 put_res(dev, slave, srqn, RES_SRQ);
2793 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2794 struct mlx4_vhcr *vhcr,
2795 struct mlx4_cmd_mailbox *inbox,
2796 struct mlx4_cmd_mailbox *outbox,
2797 struct mlx4_cmd_info *cmd)
2800 int srqn = vhcr->in_modifier;
2801 struct res_srq *srq;
2803 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2807 if (srq->com.from_state != RES_SRQ_HW) {
2812 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2814 put_res(dev, slave, srqn, RES_SRQ);
2818 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2819 struct mlx4_vhcr *vhcr,
2820 struct mlx4_cmd_mailbox *inbox,
2821 struct mlx4_cmd_mailbox *outbox,
2822 struct mlx4_cmd_info *cmd)
2825 int qpn = vhcr->in_modifier & 0x7fffff;
2828 err = get_res(dev, slave, qpn, RES_QP, &qp);
2831 if (qp->com.from_state != RES_QP_HW) {
2836 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2838 put_res(dev, slave, qpn, RES_QP);
2842 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2843 struct mlx4_vhcr *vhcr,
2844 struct mlx4_cmd_mailbox *inbox,
2845 struct mlx4_cmd_mailbox *outbox,
2846 struct mlx4_cmd_info *cmd)
2848 struct mlx4_qp_context *context = inbox->buf + 8;
2849 adjust_proxy_tun_qkey(dev, vhcr, context);
2850 update_pkey_index(dev, slave, inbox);
2851 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2854 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2855 struct mlx4_vhcr *vhcr,
2856 struct mlx4_cmd_mailbox *inbox,
2857 struct mlx4_cmd_mailbox *outbox,
2858 struct mlx4_cmd_info *cmd)
2861 struct mlx4_qp_context *qpc = inbox->buf + 8;
2862 int qpn = vhcr->in_modifier & 0x7fffff;
2864 u8 orig_sched_queue;
2866 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2870 update_pkey_index(dev, slave, inbox);
2871 update_gid(dev, inbox, (u8)slave);
2872 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2873 orig_sched_queue = qpc->pri_path.sched_queue;
2874 err = update_vport_qp_param(dev, inbox, slave, qpn);
2878 err = get_res(dev, slave, qpn, RES_QP, &qp);
2881 if (qp->com.from_state != RES_QP_HW) {
2886 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2888 /* if no error, save sched queue value passed in by VF. This is
2889 * essentially the QOS value provided by the VF. This will be useful
2890 * if we allow dynamic changes from VST back to VGT
2893 qp->sched_queue = orig_sched_queue;
2895 put_res(dev, slave, qpn, RES_QP);
2899 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2900 struct mlx4_vhcr *vhcr,
2901 struct mlx4_cmd_mailbox *inbox,
2902 struct mlx4_cmd_mailbox *outbox,
2903 struct mlx4_cmd_info *cmd)
2906 struct mlx4_qp_context *context = inbox->buf + 8;
2908 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2912 update_pkey_index(dev, slave, inbox);
2913 update_gid(dev, inbox, (u8)slave);
2914 adjust_proxy_tun_qkey(dev, vhcr, context);
2915 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2918 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2919 struct mlx4_vhcr *vhcr,
2920 struct mlx4_cmd_mailbox *inbox,
2921 struct mlx4_cmd_mailbox *outbox,
2922 struct mlx4_cmd_info *cmd)
2925 struct mlx4_qp_context *context = inbox->buf + 8;
2927 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2931 update_pkey_index(dev, slave, inbox);
2932 update_gid(dev, inbox, (u8)slave);
2933 adjust_proxy_tun_qkey(dev, vhcr, context);
2934 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2938 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2939 struct mlx4_vhcr *vhcr,
2940 struct mlx4_cmd_mailbox *inbox,
2941 struct mlx4_cmd_mailbox *outbox,
2942 struct mlx4_cmd_info *cmd)
2944 struct mlx4_qp_context *context = inbox->buf + 8;
2945 adjust_proxy_tun_qkey(dev, vhcr, context);
2946 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2949 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2950 struct mlx4_vhcr *vhcr,
2951 struct mlx4_cmd_mailbox *inbox,
2952 struct mlx4_cmd_mailbox *outbox,
2953 struct mlx4_cmd_info *cmd)
2956 struct mlx4_qp_context *context = inbox->buf + 8;
2958 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2962 adjust_proxy_tun_qkey(dev, vhcr, context);
2963 update_gid(dev, inbox, (u8)slave);
2964 update_pkey_index(dev, slave, inbox);
2965 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2968 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2969 struct mlx4_vhcr *vhcr,
2970 struct mlx4_cmd_mailbox *inbox,
2971 struct mlx4_cmd_mailbox *outbox,
2972 struct mlx4_cmd_info *cmd)
2975 struct mlx4_qp_context *context = inbox->buf + 8;
2977 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2981 adjust_proxy_tun_qkey(dev, vhcr, context);
2982 update_gid(dev, inbox, (u8)slave);
2983 update_pkey_index(dev, slave, inbox);
2984 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2987 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2988 struct mlx4_vhcr *vhcr,
2989 struct mlx4_cmd_mailbox *inbox,
2990 struct mlx4_cmd_mailbox *outbox,
2991 struct mlx4_cmd_info *cmd)
2994 int qpn = vhcr->in_modifier & 0x7fffff;
2997 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3000 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3004 atomic_dec(&qp->mtt->ref_count);
3005 atomic_dec(&qp->rcq->ref_count);
3006 atomic_dec(&qp->scq->ref_count);
3008 atomic_dec(&qp->srq->ref_count);
3009 res_end_move(dev, slave, RES_QP, qpn);
3013 res_abort_move(dev, slave, RES_QP, qpn);
3018 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3019 struct res_qp *rqp, u8 *gid)
3021 struct res_gid *res;
3023 list_for_each_entry(res, &rqp->mcg_list, list) {
3024 if (!memcmp(res->gid, gid, 16))
3030 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3031 u8 *gid, enum mlx4_protocol prot,
3032 enum mlx4_steer_type steer, u64 reg_id)
3034 struct res_gid *res;
3037 res = kzalloc(sizeof *res, GFP_KERNEL);
3041 spin_lock_irq(&rqp->mcg_spl);
3042 if (find_gid(dev, slave, rqp, gid)) {
3046 memcpy(res->gid, gid, 16);
3049 res->reg_id = reg_id;
3050 list_add_tail(&res->list, &rqp->mcg_list);
3053 spin_unlock_irq(&rqp->mcg_spl);
3058 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3059 u8 *gid, enum mlx4_protocol prot,
3060 enum mlx4_steer_type steer, u64 *reg_id)
3062 struct res_gid *res;
3065 spin_lock_irq(&rqp->mcg_spl);
3066 res = find_gid(dev, slave, rqp, gid);
3067 if (!res || res->prot != prot || res->steer != steer)
3070 *reg_id = res->reg_id;
3071 list_del(&res->list);
3075 spin_unlock_irq(&rqp->mcg_spl);
3080 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3081 int block_loopback, enum mlx4_protocol prot,
3082 enum mlx4_steer_type type, u64 *reg_id)
3084 switch (dev->caps.steering_mode) {
3085 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3086 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3087 block_loopback, prot,
3089 case MLX4_STEERING_MODE_B0:
3090 return mlx4_qp_attach_common(dev, qp, gid,
3091 block_loopback, prot, type);
3097 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3098 enum mlx4_protocol prot, enum mlx4_steer_type type,
3101 switch (dev->caps.steering_mode) {
3102 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3103 return mlx4_flow_detach(dev, reg_id);
3104 case MLX4_STEERING_MODE_B0:
3105 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3111 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3112 struct mlx4_vhcr *vhcr,
3113 struct mlx4_cmd_mailbox *inbox,
3114 struct mlx4_cmd_mailbox *outbox,
3115 struct mlx4_cmd_info *cmd)
3117 struct mlx4_qp qp; /* dummy for calling attach/detach */
3118 u8 *gid = inbox->buf;
3119 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3124 int attach = vhcr->op_modifier;
3125 int block_loopback = vhcr->in_modifier >> 31;
3126 u8 steer_type_mask = 2;
3127 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3129 qpn = vhcr->in_modifier & 0xffffff;
3130 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3136 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3139 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3142 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3146 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3150 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3152 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3155 put_res(dev, slave, qpn, RES_QP);
3159 qp_detach(dev, &qp, gid, prot, type, reg_id);
3161 put_res(dev, slave, qpn, RES_QP);
3166 * MAC validation for Flow Steering rules.
3167 * VF can attach rules only with a mac address which is assigned to it.
3169 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3170 struct list_head *rlist)
3172 struct mac_res *res, *tmp;
3175 /* make sure it isn't multicast or broadcast mac*/
3176 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3177 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3178 list_for_each_entry_safe(res, tmp, rlist, list) {
3179 be_mac = cpu_to_be64(res->mac << 16);
3180 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3183 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3184 eth_header->eth.dst_mac, slave);
3191 * In case of missing eth header, append eth header with a MAC address
3192 * assigned to the VF.
3194 static int add_eth_header(struct mlx4_dev *dev, int slave,
3195 struct mlx4_cmd_mailbox *inbox,
3196 struct list_head *rlist, int header_id)
3198 struct mac_res *res, *tmp;
3200 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3201 struct mlx4_net_trans_rule_hw_eth *eth_header;
3202 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3203 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3205 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3207 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3209 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3211 /* Clear a space in the inbox for eth header */
3212 switch (header_id) {
3213 case MLX4_NET_TRANS_RULE_ID_IPV4:
3215 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3216 memmove(ip_header, eth_header,
3217 sizeof(*ip_header) + sizeof(*l4_header));
3219 case MLX4_NET_TRANS_RULE_ID_TCP:
3220 case MLX4_NET_TRANS_RULE_ID_UDP:
3221 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3223 memmove(l4_header, eth_header, sizeof(*l4_header));
3228 list_for_each_entry_safe(res, tmp, rlist, list) {
3229 if (port == res->port) {
3230 be_mac = cpu_to_be64(res->mac << 16);
3235 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3240 memset(eth_header, 0, sizeof(*eth_header));
3241 eth_header->size = sizeof(*eth_header) >> 2;
3242 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3243 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3244 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3250 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3251 struct mlx4_vhcr *vhcr,
3252 struct mlx4_cmd_mailbox *inbox,
3253 struct mlx4_cmd_mailbox *outbox,
3254 struct mlx4_cmd_info *cmd)
3257 struct mlx4_priv *priv = mlx4_priv(dev);
3258 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3259 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3263 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3264 struct _rule_hw *rule_header;
3267 if (dev->caps.steering_mode !=
3268 MLX4_STEERING_MODE_DEVICE_MANAGED)
3271 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3272 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3273 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3275 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3278 rule_header = (struct _rule_hw *)(ctrl + 1);
3279 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3281 switch (header_id) {
3282 case MLX4_NET_TRANS_RULE_ID_ETH:
3283 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3288 case MLX4_NET_TRANS_RULE_ID_IB:
3290 case MLX4_NET_TRANS_RULE_ID_IPV4:
3291 case MLX4_NET_TRANS_RULE_ID_TCP:
3292 case MLX4_NET_TRANS_RULE_ID_UDP:
3293 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3294 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3298 vhcr->in_modifier +=
3299 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3302 pr_err("Corrupted mailbox.\n");
3307 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3308 vhcr->in_modifier, 0,
3309 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3314 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3316 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3318 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3319 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3323 atomic_inc(&rqp->ref_count);
3325 put_res(dev, slave, qpn, RES_QP);
3329 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3330 struct mlx4_vhcr *vhcr,
3331 struct mlx4_cmd_mailbox *inbox,
3332 struct mlx4_cmd_mailbox *outbox,
3333 struct mlx4_cmd_info *cmd)
3337 struct res_fs_rule *rrule;
3339 if (dev->caps.steering_mode !=
3340 MLX4_STEERING_MODE_DEVICE_MANAGED)
3343 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3346 /* Release the rule form busy state before removal */
3347 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3348 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3352 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3354 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3358 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3359 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3362 atomic_dec(&rqp->ref_count);
3364 put_res(dev, slave, rrule->qpn, RES_QP);
3369 BUSY_MAX_RETRIES = 10
3372 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3373 struct mlx4_vhcr *vhcr,
3374 struct mlx4_cmd_mailbox *inbox,
3375 struct mlx4_cmd_mailbox *outbox,
3376 struct mlx4_cmd_info *cmd)
3379 int index = vhcr->in_modifier & 0xffff;
3381 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3385 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3386 put_res(dev, slave, index, RES_COUNTER);
3390 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3392 struct res_gid *rgid;
3393 struct res_gid *tmp;
3394 struct mlx4_qp qp; /* dummy for calling attach/detach */
3396 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3397 switch (dev->caps.steering_mode) {
3398 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3399 mlx4_flow_detach(dev, rgid->reg_id);
3401 case MLX4_STEERING_MODE_B0:
3402 qp.qpn = rqp->local_qpn;
3403 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3404 rgid->prot, rgid->steer);
3407 list_del(&rgid->list);
3412 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3413 enum mlx4_resource type, int print)
3415 struct mlx4_priv *priv = mlx4_priv(dev);
3416 struct mlx4_resource_tracker *tracker =
3417 &priv->mfunc.master.res_tracker;
3418 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3419 struct res_common *r;
3420 struct res_common *tmp;
3424 spin_lock_irq(mlx4_tlock(dev));
3425 list_for_each_entry_safe(r, tmp, rlist, list) {
3426 if (r->owner == slave) {
3428 if (r->state == RES_ANY_BUSY) {
3431 "%s id 0x%llx is busy\n",
3436 r->from_state = r->state;
3437 r->state = RES_ANY_BUSY;
3443 spin_unlock_irq(mlx4_tlock(dev));
3448 static int move_all_busy(struct mlx4_dev *dev, int slave,
3449 enum mlx4_resource type)
3451 unsigned long begin;
3456 busy = _move_all_busy(dev, slave, type, 0);
3457 if (time_after(jiffies, begin + 5 * HZ))
3464 busy = _move_all_busy(dev, slave, type, 1);
3468 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3470 struct mlx4_priv *priv = mlx4_priv(dev);
3471 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3472 struct list_head *qp_list =
3473 &tracker->slave_list[slave].res_list[RES_QP];
3481 err = move_all_busy(dev, slave, RES_QP);
3483 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3484 "for slave %d\n", slave);
3486 spin_lock_irq(mlx4_tlock(dev));
3487 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3488 spin_unlock_irq(mlx4_tlock(dev));
3489 if (qp->com.owner == slave) {
3490 qpn = qp->com.res_id;
3491 detach_qp(dev, slave, qp);
3492 state = qp->com.from_state;
3493 while (state != 0) {
3495 case RES_QP_RESERVED:
3496 spin_lock_irq(mlx4_tlock(dev));
3497 rb_erase(&qp->com.node,
3498 &tracker->res_tree[RES_QP]);
3499 list_del(&qp->com.list);
3500 spin_unlock_irq(mlx4_tlock(dev));
3505 if (!valid_reserved(dev, slave, qpn))
3506 __mlx4_qp_free_icm(dev, qpn);
3507 state = RES_QP_RESERVED;
3511 err = mlx4_cmd(dev, in_param,
3514 MLX4_CMD_TIME_CLASS_A,
3517 mlx4_dbg(dev, "rem_slave_qps: failed"
3518 " to move slave %d qpn %d to"
3521 atomic_dec(&qp->rcq->ref_count);
3522 atomic_dec(&qp->scq->ref_count);
3523 atomic_dec(&qp->mtt->ref_count);
3525 atomic_dec(&qp->srq->ref_count);
3526 state = RES_QP_MAPPED;
3533 spin_lock_irq(mlx4_tlock(dev));
3535 spin_unlock_irq(mlx4_tlock(dev));
3538 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3540 struct mlx4_priv *priv = mlx4_priv(dev);
3541 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3542 struct list_head *srq_list =
3543 &tracker->slave_list[slave].res_list[RES_SRQ];
3544 struct res_srq *srq;
3545 struct res_srq *tmp;
3552 err = move_all_busy(dev, slave, RES_SRQ);
3554 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3555 "busy for slave %d\n", slave);
3557 spin_lock_irq(mlx4_tlock(dev));
3558 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3559 spin_unlock_irq(mlx4_tlock(dev));
3560 if (srq->com.owner == slave) {
3561 srqn = srq->com.res_id;
3562 state = srq->com.from_state;
3563 while (state != 0) {
3565 case RES_SRQ_ALLOCATED:
3566 __mlx4_srq_free_icm(dev, srqn);
3567 spin_lock_irq(mlx4_tlock(dev));
3568 rb_erase(&srq->com.node,
3569 &tracker->res_tree[RES_SRQ]);
3570 list_del(&srq->com.list);
3571 spin_unlock_irq(mlx4_tlock(dev));
3578 err = mlx4_cmd(dev, in_param, srqn, 1,
3580 MLX4_CMD_TIME_CLASS_A,
3583 mlx4_dbg(dev, "rem_slave_srqs: failed"
3584 " to move slave %d srq %d to"
3588 atomic_dec(&srq->mtt->ref_count);
3590 atomic_dec(&srq->cq->ref_count);
3591 state = RES_SRQ_ALLOCATED;
3599 spin_lock_irq(mlx4_tlock(dev));
3601 spin_unlock_irq(mlx4_tlock(dev));
3604 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3606 struct mlx4_priv *priv = mlx4_priv(dev);
3607 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3608 struct list_head *cq_list =
3609 &tracker->slave_list[slave].res_list[RES_CQ];
3618 err = move_all_busy(dev, slave, RES_CQ);
3620 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3621 "busy for slave %d\n", slave);
3623 spin_lock_irq(mlx4_tlock(dev));
3624 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3625 spin_unlock_irq(mlx4_tlock(dev));
3626 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3627 cqn = cq->com.res_id;
3628 state = cq->com.from_state;
3629 while (state != 0) {
3631 case RES_CQ_ALLOCATED:
3632 __mlx4_cq_free_icm(dev, cqn);
3633 spin_lock_irq(mlx4_tlock(dev));
3634 rb_erase(&cq->com.node,
3635 &tracker->res_tree[RES_CQ]);
3636 list_del(&cq->com.list);
3637 spin_unlock_irq(mlx4_tlock(dev));
3644 err = mlx4_cmd(dev, in_param, cqn, 1,
3646 MLX4_CMD_TIME_CLASS_A,
3649 mlx4_dbg(dev, "rem_slave_cqs: failed"
3650 " to move slave %d cq %d to"
3653 atomic_dec(&cq->mtt->ref_count);
3654 state = RES_CQ_ALLOCATED;
3662 spin_lock_irq(mlx4_tlock(dev));
3664 spin_unlock_irq(mlx4_tlock(dev));
3667 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3669 struct mlx4_priv *priv = mlx4_priv(dev);
3670 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3671 struct list_head *mpt_list =
3672 &tracker->slave_list[slave].res_list[RES_MPT];
3673 struct res_mpt *mpt;
3674 struct res_mpt *tmp;
3681 err = move_all_busy(dev, slave, RES_MPT);
3683 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3684 "busy for slave %d\n", slave);
3686 spin_lock_irq(mlx4_tlock(dev));
3687 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3688 spin_unlock_irq(mlx4_tlock(dev));
3689 if (mpt->com.owner == slave) {
3690 mptn = mpt->com.res_id;
3691 state = mpt->com.from_state;
3692 while (state != 0) {
3694 case RES_MPT_RESERVED:
3695 __mlx4_mpt_release(dev, mpt->key);
3696 spin_lock_irq(mlx4_tlock(dev));
3697 rb_erase(&mpt->com.node,
3698 &tracker->res_tree[RES_MPT]);
3699 list_del(&mpt->com.list);
3700 spin_unlock_irq(mlx4_tlock(dev));
3705 case RES_MPT_MAPPED:
3706 __mlx4_mpt_free_icm(dev, mpt->key);
3707 state = RES_MPT_RESERVED;
3712 err = mlx4_cmd(dev, in_param, mptn, 0,
3714 MLX4_CMD_TIME_CLASS_A,
3717 mlx4_dbg(dev, "rem_slave_mrs: failed"
3718 " to move slave %d mpt %d to"
3722 atomic_dec(&mpt->mtt->ref_count);
3723 state = RES_MPT_MAPPED;
3730 spin_lock_irq(mlx4_tlock(dev));
3732 spin_unlock_irq(mlx4_tlock(dev));
3735 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3737 struct mlx4_priv *priv = mlx4_priv(dev);
3738 struct mlx4_resource_tracker *tracker =
3739 &priv->mfunc.master.res_tracker;
3740 struct list_head *mtt_list =
3741 &tracker->slave_list[slave].res_list[RES_MTT];
3742 struct res_mtt *mtt;
3743 struct res_mtt *tmp;
3749 err = move_all_busy(dev, slave, RES_MTT);
3751 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3752 "busy for slave %d\n", slave);
3754 spin_lock_irq(mlx4_tlock(dev));
3755 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3756 spin_unlock_irq(mlx4_tlock(dev));
3757 if (mtt->com.owner == slave) {
3758 base = mtt->com.res_id;
3759 state = mtt->com.from_state;
3760 while (state != 0) {
3762 case RES_MTT_ALLOCATED:
3763 __mlx4_free_mtt_range(dev, base,
3765 spin_lock_irq(mlx4_tlock(dev));
3766 rb_erase(&mtt->com.node,
3767 &tracker->res_tree[RES_MTT]);
3768 list_del(&mtt->com.list);
3769 spin_unlock_irq(mlx4_tlock(dev));
3779 spin_lock_irq(mlx4_tlock(dev));
3781 spin_unlock_irq(mlx4_tlock(dev));
3784 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3786 struct mlx4_priv *priv = mlx4_priv(dev);
3787 struct mlx4_resource_tracker *tracker =
3788 &priv->mfunc.master.res_tracker;
3789 struct list_head *fs_rule_list =
3790 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3791 struct res_fs_rule *fs_rule;
3792 struct res_fs_rule *tmp;
3797 err = move_all_busy(dev, slave, RES_FS_RULE);
3799 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3802 spin_lock_irq(mlx4_tlock(dev));
3803 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3804 spin_unlock_irq(mlx4_tlock(dev));
3805 if (fs_rule->com.owner == slave) {
3806 base = fs_rule->com.res_id;
3807 state = fs_rule->com.from_state;
3808 while (state != 0) {
3810 case RES_FS_RULE_ALLOCATED:
3812 err = mlx4_cmd(dev, base, 0, 0,
3813 MLX4_QP_FLOW_STEERING_DETACH,
3814 MLX4_CMD_TIME_CLASS_A,
3817 spin_lock_irq(mlx4_tlock(dev));
3818 rb_erase(&fs_rule->com.node,
3819 &tracker->res_tree[RES_FS_RULE]);
3820 list_del(&fs_rule->com.list);
3821 spin_unlock_irq(mlx4_tlock(dev));
3831 spin_lock_irq(mlx4_tlock(dev));
3833 spin_unlock_irq(mlx4_tlock(dev));
3836 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3838 struct mlx4_priv *priv = mlx4_priv(dev);
3839 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3840 struct list_head *eq_list =
3841 &tracker->slave_list[slave].res_list[RES_EQ];
3848 struct mlx4_cmd_mailbox *mailbox;
3850 err = move_all_busy(dev, slave, RES_EQ);
3852 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3853 "busy for slave %d\n", slave);
3855 spin_lock_irq(mlx4_tlock(dev));
3856 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3857 spin_unlock_irq(mlx4_tlock(dev));
3858 if (eq->com.owner == slave) {
3859 eqn = eq->com.res_id;
3860 state = eq->com.from_state;
3861 while (state != 0) {
3863 case RES_EQ_RESERVED:
3864 spin_lock_irq(mlx4_tlock(dev));
3865 rb_erase(&eq->com.node,
3866 &tracker->res_tree[RES_EQ]);
3867 list_del(&eq->com.list);
3868 spin_unlock_irq(mlx4_tlock(dev));
3874 mailbox = mlx4_alloc_cmd_mailbox(dev);
3875 if (IS_ERR(mailbox)) {
3879 err = mlx4_cmd_box(dev, slave, 0,
3882 MLX4_CMD_TIME_CLASS_A,
3885 mlx4_dbg(dev, "rem_slave_eqs: failed"
3886 " to move slave %d eqs %d to"
3887 " SW ownership\n", slave, eqn);
3888 mlx4_free_cmd_mailbox(dev, mailbox);
3889 atomic_dec(&eq->mtt->ref_count);
3890 state = RES_EQ_RESERVED;
3898 spin_lock_irq(mlx4_tlock(dev));
3900 spin_unlock_irq(mlx4_tlock(dev));
3903 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3905 struct mlx4_priv *priv = mlx4_priv(dev);
3906 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3907 struct list_head *counter_list =
3908 &tracker->slave_list[slave].res_list[RES_COUNTER];
3909 struct res_counter *counter;
3910 struct res_counter *tmp;
3914 err = move_all_busy(dev, slave, RES_COUNTER);
3916 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3917 "busy for slave %d\n", slave);
3919 spin_lock_irq(mlx4_tlock(dev));
3920 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3921 if (counter->com.owner == slave) {
3922 index = counter->com.res_id;
3923 rb_erase(&counter->com.node,
3924 &tracker->res_tree[RES_COUNTER]);
3925 list_del(&counter->com.list);
3927 __mlx4_counter_free(dev, index);
3930 spin_unlock_irq(mlx4_tlock(dev));
3933 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3935 struct mlx4_priv *priv = mlx4_priv(dev);
3936 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3937 struct list_head *xrcdn_list =
3938 &tracker->slave_list[slave].res_list[RES_XRCD];
3939 struct res_xrcdn *xrcd;
3940 struct res_xrcdn *tmp;
3944 err = move_all_busy(dev, slave, RES_XRCD);
3946 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3947 "busy for slave %d\n", slave);
3949 spin_lock_irq(mlx4_tlock(dev));
3950 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3951 if (xrcd->com.owner == slave) {
3952 xrcdn = xrcd->com.res_id;
3953 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3954 list_del(&xrcd->com.list);
3956 __mlx4_xrcd_free(dev, xrcdn);
3959 spin_unlock_irq(mlx4_tlock(dev));
3962 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3964 struct mlx4_priv *priv = mlx4_priv(dev);
3966 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3968 rem_slave_macs(dev, slave);
3969 rem_slave_fs_rule(dev, slave);
3970 rem_slave_qps(dev, slave);
3971 rem_slave_srqs(dev, slave);
3972 rem_slave_cqs(dev, slave);
3973 rem_slave_mrs(dev, slave);
3974 rem_slave_eqs(dev, slave);
3975 rem_slave_mtts(dev, slave);
3976 rem_slave_counters(dev, slave);
3977 rem_slave_xrcdns(dev, slave);
3978 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3981 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
3983 struct mlx4_vf_immed_vlan_work *work =
3984 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
3985 struct mlx4_cmd_mailbox *mailbox;
3986 struct mlx4_update_qp_context *upd_context;
3987 struct mlx4_dev *dev = &work->priv->dev;
3988 struct mlx4_resource_tracker *tracker =
3989 &work->priv->mfunc.master.res_tracker;
3990 struct list_head *qp_list =
3991 &tracker->slave_list[work->slave].res_list[RES_QP];
3994 u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
3995 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
3996 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
3997 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
3998 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
3999 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
4000 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4001 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4004 int port, errors = 0;
4007 if (mlx4_is_slave(dev)) {
4008 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4013 mailbox = mlx4_alloc_cmd_mailbox(dev);
4014 if (IS_ERR(mailbox))
4016 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4017 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4018 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4019 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4020 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4021 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4022 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4023 else if (!work->vlan_id)
4024 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4025 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4027 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4028 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4029 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4031 upd_context = mailbox->buf;
4032 upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4033 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4034 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4036 spin_lock_irq(mlx4_tlock(dev));
4037 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4038 spin_unlock_irq(mlx4_tlock(dev));
4039 if (qp->com.owner == work->slave) {
4040 if (qp->com.from_state != RES_QP_HW ||
4041 !qp->sched_queue || /* no INIT2RTR trans yet */
4042 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4043 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4044 spin_lock_irq(mlx4_tlock(dev));
4047 port = (qp->sched_queue >> 6 & 1) + 1;
4048 if (port != work->port) {
4049 spin_lock_irq(mlx4_tlock(dev));
4052 upd_context->qp_context.pri_path.sched_queue =
4053 qp->sched_queue & 0xC7;
4054 upd_context->qp_context.pri_path.sched_queue |=
4055 ((work->qos & 0x7) << 3);
4057 err = mlx4_cmd(dev, mailbox->dma,
4058 qp->local_qpn & 0xffffff,
4059 0, MLX4_CMD_UPDATE_QP,
4060 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4062 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4063 "port %d, qpn %d (%d)\n",
4064 work->slave, port, qp->local_qpn,
4069 spin_lock_irq(mlx4_tlock(dev));
4071 spin_unlock_irq(mlx4_tlock(dev));
4072 mlx4_free_cmd_mailbox(dev, mailbox);
4075 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4076 errors, work->slave, work->port);
4078 /* unregister previous vlan_id if needed and we had no errors
4079 * while updating the QPs
4081 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4082 NO_INDX != work->orig_vlan_ix)
4083 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4084 work->orig_vlan_ix);