2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
59 struct list_head list;
67 struct list_head list;
82 struct list_head list;
84 enum mlx4_protocol prot;
85 enum mlx4_steer_type steer;
90 RES_QP_BUSY = RES_ANY_BUSY,
92 /* QP number was allocated */
95 /* ICM memory for QP context was mapped */
98 /* QP is in hw ownership */
103 struct res_common com;
108 struct list_head mcg_list;
116 enum res_mtt_states {
117 RES_MTT_BUSY = RES_ANY_BUSY,
121 static inline const char *mtt_states_str(enum res_mtt_states state)
124 case RES_MTT_BUSY: return "RES_MTT_BUSY";
125 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
126 default: return "Unknown";
131 struct res_common com;
136 enum res_mpt_states {
137 RES_MPT_BUSY = RES_ANY_BUSY,
144 struct res_common com;
150 RES_EQ_BUSY = RES_ANY_BUSY,
156 struct res_common com;
161 RES_CQ_BUSY = RES_ANY_BUSY,
167 struct res_common com;
172 enum res_srq_states {
173 RES_SRQ_BUSY = RES_ANY_BUSY,
179 struct res_common com;
185 enum res_counter_states {
186 RES_COUNTER_BUSY = RES_ANY_BUSY,
187 RES_COUNTER_ALLOCATED,
191 struct res_common com;
195 enum res_xrcdn_states {
196 RES_XRCD_BUSY = RES_ANY_BUSY,
201 struct res_common com;
205 enum res_fs_rule_states {
206 RES_FS_RULE_BUSY = RES_ANY_BUSY,
207 RES_FS_RULE_ALLOCATED,
211 struct res_common com;
215 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
217 struct rb_node *node = root->rb_node;
220 struct res_common *res = container_of(node, struct res_common,
223 if (res_id < res->res_id)
224 node = node->rb_left;
225 else if (res_id > res->res_id)
226 node = node->rb_right;
233 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
235 struct rb_node **new = &(root->rb_node), *parent = NULL;
237 /* Figure out where to put new node */
239 struct res_common *this = container_of(*new, struct res_common,
243 if (res->res_id < this->res_id)
244 new = &((*new)->rb_left);
245 else if (res->res_id > this->res_id)
246 new = &((*new)->rb_right);
251 /* Add new node and rebalance tree. */
252 rb_link_node(&res->node, parent, new);
253 rb_insert_color(&res->node, root);
268 static const char *ResourceType(enum mlx4_resource rt)
271 case RES_QP: return "RES_QP";
272 case RES_CQ: return "RES_CQ";
273 case RES_SRQ: return "RES_SRQ";
274 case RES_MPT: return "RES_MPT";
275 case RES_MTT: return "RES_MTT";
276 case RES_MAC: return "RES_MAC";
277 case RES_VLAN: return "RES_VLAN";
278 case RES_EQ: return "RES_EQ";
279 case RES_COUNTER: return "RES_COUNTER";
280 case RES_FS_RULE: return "RES_FS_RULE";
281 case RES_XRCD: return "RES_XRCD";
282 default: return "Unknown resource type !!!";
286 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
287 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
288 enum mlx4_resource res_type, int count,
291 struct mlx4_priv *priv = mlx4_priv(dev);
292 struct resource_allocator *res_alloc =
293 &priv->mfunc.master.res_tracker.res_alloc[res_type];
295 int allocated, free, reserved, guaranteed, from_free;
297 if (slave > dev->num_vfs)
300 spin_lock(&res_alloc->alloc_lock);
301 allocated = (port > 0) ?
302 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
303 res_alloc->allocated[slave];
304 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
306 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
307 res_alloc->res_reserved;
308 guaranteed = res_alloc->guaranteed[slave];
310 if (allocated + count > res_alloc->quota[slave])
313 if (allocated + count <= guaranteed) {
316 /* portion may need to be obtained from free area */
317 if (guaranteed - allocated > 0)
318 from_free = count - (guaranteed - allocated);
322 if (free - from_free > reserved)
327 /* grant the request */
329 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
330 res_alloc->res_port_free[port - 1] -= count;
332 res_alloc->allocated[slave] += count;
333 res_alloc->res_free -= count;
338 spin_unlock(&res_alloc->alloc_lock);
342 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
343 enum mlx4_resource res_type, int count,
346 struct mlx4_priv *priv = mlx4_priv(dev);
347 struct resource_allocator *res_alloc =
348 &priv->mfunc.master.res_tracker.res_alloc[res_type];
350 if (slave > dev->num_vfs)
353 spin_lock(&res_alloc->alloc_lock);
355 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
356 res_alloc->res_port_free[port - 1] += count;
358 res_alloc->allocated[slave] -= count;
359 res_alloc->res_free += count;
362 spin_unlock(&res_alloc->alloc_lock);
366 static inline void initialize_res_quotas(struct mlx4_dev *dev,
367 struct resource_allocator *res_alloc,
368 enum mlx4_resource res_type,
369 int vf, int num_instances)
371 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
372 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
373 if (vf == mlx4_master_func_num(dev)) {
374 res_alloc->res_free = num_instances;
375 if (res_type == RES_MTT) {
376 /* reserved mtts will be taken out of the PF allocation */
377 res_alloc->res_free += dev->caps.reserved_mtts;
378 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
379 res_alloc->quota[vf] += dev->caps.reserved_mtts;
384 void mlx4_init_quotas(struct mlx4_dev *dev)
386 struct mlx4_priv *priv = mlx4_priv(dev);
389 /* quotas for VFs are initialized in mlx4_slave_cap */
390 if (mlx4_is_slave(dev))
393 if (!mlx4_is_mfunc(dev)) {
394 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
395 mlx4_num_reserved_sqps(dev);
396 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
397 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
398 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
399 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
403 pf = mlx4_master_func_num(dev);
405 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
407 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
409 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
411 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
413 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
415 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
417 struct mlx4_priv *priv = mlx4_priv(dev);
421 priv->mfunc.master.res_tracker.slave_list =
422 kzalloc(dev->num_slaves * sizeof(struct slave_list),
424 if (!priv->mfunc.master.res_tracker.slave_list)
427 for (i = 0 ; i < dev->num_slaves; i++) {
428 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
429 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
430 slave_list[i].res_list[t]);
431 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
434 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
436 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
437 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
439 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
440 struct resource_allocator *res_alloc =
441 &priv->mfunc.master.res_tracker.res_alloc[i];
442 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
443 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
444 if (i == RES_MAC || i == RES_VLAN)
445 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
446 (dev->num_vfs + 1) * sizeof(int),
449 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
451 if (!res_alloc->quota || !res_alloc->guaranteed ||
452 !res_alloc->allocated)
455 spin_lock_init(&res_alloc->alloc_lock);
456 for (t = 0; t < dev->num_vfs + 1; t++) {
459 initialize_res_quotas(dev, res_alloc, RES_QP,
460 t, dev->caps.num_qps -
461 dev->caps.reserved_qps -
462 mlx4_num_reserved_sqps(dev));
465 initialize_res_quotas(dev, res_alloc, RES_CQ,
466 t, dev->caps.num_cqs -
467 dev->caps.reserved_cqs);
470 initialize_res_quotas(dev, res_alloc, RES_SRQ,
471 t, dev->caps.num_srqs -
472 dev->caps.reserved_srqs);
475 initialize_res_quotas(dev, res_alloc, RES_MPT,
476 t, dev->caps.num_mpts -
477 dev->caps.reserved_mrws);
480 initialize_res_quotas(dev, res_alloc, RES_MTT,
481 t, dev->caps.num_mtts -
482 dev->caps.reserved_mtts);
485 if (t == mlx4_master_func_num(dev)) {
486 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
487 res_alloc->guaranteed[t] = 2;
488 for (j = 0; j < MLX4_MAX_PORTS; j++)
489 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
491 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
492 res_alloc->guaranteed[t] = 2;
496 if (t == mlx4_master_func_num(dev)) {
497 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
498 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
499 for (j = 0; j < MLX4_MAX_PORTS; j++)
500 res_alloc->res_port_free[j] =
503 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
504 res_alloc->guaranteed[t] = 0;
508 res_alloc->quota[t] = dev->caps.max_counters;
509 res_alloc->guaranteed[t] = 0;
510 if (t == mlx4_master_func_num(dev))
511 res_alloc->res_free = res_alloc->quota[t];
516 if (i == RES_MAC || i == RES_VLAN) {
517 for (j = 0; j < MLX4_MAX_PORTS; j++)
518 res_alloc->res_port_rsvd[j] +=
519 res_alloc->guaranteed[t];
521 res_alloc->res_reserved += res_alloc->guaranteed[t];
525 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
529 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
530 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
531 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
532 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
533 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
534 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
535 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
540 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
541 enum mlx4_res_tracker_free_type type)
543 struct mlx4_priv *priv = mlx4_priv(dev);
546 if (priv->mfunc.master.res_tracker.slave_list) {
547 if (type != RES_TR_FREE_STRUCTS_ONLY) {
548 for (i = 0; i < dev->num_slaves; i++) {
549 if (type == RES_TR_FREE_ALL ||
550 dev->caps.function != i)
551 mlx4_delete_all_resources_for_slave(dev, i);
553 /* free master's vlans */
554 i = dev->caps.function;
555 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
556 rem_slave_vlans(dev, i);
557 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
560 if (type != RES_TR_FREE_SLAVES_ONLY) {
561 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
562 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
563 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
564 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
565 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
566 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
567 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
569 kfree(priv->mfunc.master.res_tracker.slave_list);
570 priv->mfunc.master.res_tracker.slave_list = NULL;
575 static void update_pkey_index(struct mlx4_dev *dev, int slave,
576 struct mlx4_cmd_mailbox *inbox)
578 u8 sched = *(u8 *)(inbox->buf + 64);
579 u8 orig_index = *(u8 *)(inbox->buf + 35);
581 struct mlx4_priv *priv = mlx4_priv(dev);
584 port = (sched >> 6 & 1) + 1;
586 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
587 *(u8 *)(inbox->buf + 35) = new_index;
590 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
593 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
594 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
595 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
597 if (MLX4_QP_ST_UD == ts)
598 qp_ctx->pri_path.mgid_index = 0x80 | slave;
600 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
601 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
602 qp_ctx->pri_path.mgid_index = slave & 0x7F;
603 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
604 qp_ctx->alt_path.mgid_index = slave & 0x7F;
608 static int update_vport_qp_param(struct mlx4_dev *dev,
609 struct mlx4_cmd_mailbox *inbox,
612 struct mlx4_qp_context *qpc = inbox->buf + 8;
613 struct mlx4_vport_oper_state *vp_oper;
614 struct mlx4_priv *priv;
618 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
619 priv = mlx4_priv(dev);
620 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
622 if (MLX4_VGT != vp_oper->state.default_vlan) {
623 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
624 if (MLX4_QP_ST_RC == qp_type ||
625 (MLX4_QP_ST_UD == qp_type &&
626 !mlx4_is_qp_reserved(dev, qpn)))
629 /* the reserved QPs (special, proxy, tunnel)
630 * do not operate over vlans
632 if (mlx4_is_qp_reserved(dev, qpn))
635 /* force strip vlan by clear vsd */
636 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
638 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
639 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
640 qpc->pri_path.vlan_control =
641 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
642 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
643 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
644 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
645 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
646 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
647 } else if (0 != vp_oper->state.default_vlan) {
648 qpc->pri_path.vlan_control =
649 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
650 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
651 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
652 } else { /* priority tagged */
653 qpc->pri_path.vlan_control =
654 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
655 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
658 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
659 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
660 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
661 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
662 qpc->pri_path.sched_queue &= 0xC7;
663 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
665 if (vp_oper->state.spoofchk) {
666 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
667 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
672 static int mpt_mask(struct mlx4_dev *dev)
674 return dev->caps.num_mpts - 1;
677 static void *find_res(struct mlx4_dev *dev, u64 res_id,
678 enum mlx4_resource type)
680 struct mlx4_priv *priv = mlx4_priv(dev);
682 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
686 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
687 enum mlx4_resource type,
690 struct res_common *r;
693 spin_lock_irq(mlx4_tlock(dev));
694 r = find_res(dev, res_id, type);
700 if (r->state == RES_ANY_BUSY) {
705 if (r->owner != slave) {
710 r->from_state = r->state;
711 r->state = RES_ANY_BUSY;
714 *((struct res_common **)res) = r;
717 spin_unlock_irq(mlx4_tlock(dev));
721 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
722 enum mlx4_resource type,
723 u64 res_id, int *slave)
726 struct res_common *r;
732 spin_lock(mlx4_tlock(dev));
734 r = find_res(dev, id, type);
739 spin_unlock(mlx4_tlock(dev));
744 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
745 enum mlx4_resource type)
747 struct res_common *r;
749 spin_lock_irq(mlx4_tlock(dev));
750 r = find_res(dev, res_id, type);
752 r->state = r->from_state;
753 spin_unlock_irq(mlx4_tlock(dev));
756 static struct res_common *alloc_qp_tr(int id)
760 ret = kzalloc(sizeof *ret, GFP_KERNEL);
764 ret->com.res_id = id;
765 ret->com.state = RES_QP_RESERVED;
767 INIT_LIST_HEAD(&ret->mcg_list);
768 spin_lock_init(&ret->mcg_spl);
769 atomic_set(&ret->ref_count, 0);
774 static struct res_common *alloc_mtt_tr(int id, int order)
778 ret = kzalloc(sizeof *ret, GFP_KERNEL);
782 ret->com.res_id = id;
784 ret->com.state = RES_MTT_ALLOCATED;
785 atomic_set(&ret->ref_count, 0);
790 static struct res_common *alloc_mpt_tr(int id, int key)
794 ret = kzalloc(sizeof *ret, GFP_KERNEL);
798 ret->com.res_id = id;
799 ret->com.state = RES_MPT_RESERVED;
805 static struct res_common *alloc_eq_tr(int id)
809 ret = kzalloc(sizeof *ret, GFP_KERNEL);
813 ret->com.res_id = id;
814 ret->com.state = RES_EQ_RESERVED;
819 static struct res_common *alloc_cq_tr(int id)
823 ret = kzalloc(sizeof *ret, GFP_KERNEL);
827 ret->com.res_id = id;
828 ret->com.state = RES_CQ_ALLOCATED;
829 atomic_set(&ret->ref_count, 0);
834 static struct res_common *alloc_srq_tr(int id)
838 ret = kzalloc(sizeof *ret, GFP_KERNEL);
842 ret->com.res_id = id;
843 ret->com.state = RES_SRQ_ALLOCATED;
844 atomic_set(&ret->ref_count, 0);
849 static struct res_common *alloc_counter_tr(int id)
851 struct res_counter *ret;
853 ret = kzalloc(sizeof *ret, GFP_KERNEL);
857 ret->com.res_id = id;
858 ret->com.state = RES_COUNTER_ALLOCATED;
863 static struct res_common *alloc_xrcdn_tr(int id)
865 struct res_xrcdn *ret;
867 ret = kzalloc(sizeof *ret, GFP_KERNEL);
871 ret->com.res_id = id;
872 ret->com.state = RES_XRCD_ALLOCATED;
877 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
879 struct res_fs_rule *ret;
881 ret = kzalloc(sizeof *ret, GFP_KERNEL);
885 ret->com.res_id = id;
886 ret->com.state = RES_FS_RULE_ALLOCATED;
891 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
894 struct res_common *ret;
898 ret = alloc_qp_tr(id);
901 ret = alloc_mpt_tr(id, extra);
904 ret = alloc_mtt_tr(id, extra);
907 ret = alloc_eq_tr(id);
910 ret = alloc_cq_tr(id);
913 ret = alloc_srq_tr(id);
916 printk(KERN_ERR "implementation missing\n");
919 ret = alloc_counter_tr(id);
922 ret = alloc_xrcdn_tr(id);
925 ret = alloc_fs_rule_tr(id, extra);
936 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
937 enum mlx4_resource type, int extra)
941 struct mlx4_priv *priv = mlx4_priv(dev);
942 struct res_common **res_arr;
943 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
944 struct rb_root *root = &tracker->res_tree[type];
946 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
950 for (i = 0; i < count; ++i) {
951 res_arr[i] = alloc_tr(base + i, type, slave, extra);
953 for (--i; i >= 0; --i)
961 spin_lock_irq(mlx4_tlock(dev));
962 for (i = 0; i < count; ++i) {
963 if (find_res(dev, base + i, type)) {
967 err = res_tracker_insert(root, res_arr[i]);
970 list_add_tail(&res_arr[i]->list,
971 &tracker->slave_list[slave].res_list[type]);
973 spin_unlock_irq(mlx4_tlock(dev));
979 for (--i; i >= base; --i)
980 rb_erase(&res_arr[i]->node, root);
982 spin_unlock_irq(mlx4_tlock(dev));
984 for (i = 0; i < count; ++i)
992 static int remove_qp_ok(struct res_qp *res)
994 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
995 !list_empty(&res->mcg_list)) {
996 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
997 res->com.state, atomic_read(&res->ref_count));
999 } else if (res->com.state != RES_QP_RESERVED) {
1006 static int remove_mtt_ok(struct res_mtt *res, int order)
1008 if (res->com.state == RES_MTT_BUSY ||
1009 atomic_read(&res->ref_count)) {
1010 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1012 mtt_states_str(res->com.state),
1013 atomic_read(&res->ref_count));
1015 } else if (res->com.state != RES_MTT_ALLOCATED)
1017 else if (res->order != order)
1023 static int remove_mpt_ok(struct res_mpt *res)
1025 if (res->com.state == RES_MPT_BUSY)
1027 else if (res->com.state != RES_MPT_RESERVED)
1033 static int remove_eq_ok(struct res_eq *res)
1035 if (res->com.state == RES_MPT_BUSY)
1037 else if (res->com.state != RES_MPT_RESERVED)
1043 static int remove_counter_ok(struct res_counter *res)
1045 if (res->com.state == RES_COUNTER_BUSY)
1047 else if (res->com.state != RES_COUNTER_ALLOCATED)
1053 static int remove_xrcdn_ok(struct res_xrcdn *res)
1055 if (res->com.state == RES_XRCD_BUSY)
1057 else if (res->com.state != RES_XRCD_ALLOCATED)
1063 static int remove_fs_rule_ok(struct res_fs_rule *res)
1065 if (res->com.state == RES_FS_RULE_BUSY)
1067 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1073 static int remove_cq_ok(struct res_cq *res)
1075 if (res->com.state == RES_CQ_BUSY)
1077 else if (res->com.state != RES_CQ_ALLOCATED)
1083 static int remove_srq_ok(struct res_srq *res)
1085 if (res->com.state == RES_SRQ_BUSY)
1087 else if (res->com.state != RES_SRQ_ALLOCATED)
1093 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1097 return remove_qp_ok((struct res_qp *)res);
1099 return remove_cq_ok((struct res_cq *)res);
1101 return remove_srq_ok((struct res_srq *)res);
1103 return remove_mpt_ok((struct res_mpt *)res);
1105 return remove_mtt_ok((struct res_mtt *)res, extra);
1109 return remove_eq_ok((struct res_eq *)res);
1111 return remove_counter_ok((struct res_counter *)res);
1113 return remove_xrcdn_ok((struct res_xrcdn *)res);
1115 return remove_fs_rule_ok((struct res_fs_rule *)res);
1121 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1122 enum mlx4_resource type, int extra)
1126 struct mlx4_priv *priv = mlx4_priv(dev);
1127 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1128 struct res_common *r;
1130 spin_lock_irq(mlx4_tlock(dev));
1131 for (i = base; i < base + count; ++i) {
1132 r = res_tracker_lookup(&tracker->res_tree[type], i);
1137 if (r->owner != slave) {
1141 err = remove_ok(r, type, extra);
1146 for (i = base; i < base + count; ++i) {
1147 r = res_tracker_lookup(&tracker->res_tree[type], i);
1148 rb_erase(&r->node, &tracker->res_tree[type]);
1155 spin_unlock_irq(mlx4_tlock(dev));
1160 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1161 enum res_qp_states state, struct res_qp **qp,
1164 struct mlx4_priv *priv = mlx4_priv(dev);
1165 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1169 spin_lock_irq(mlx4_tlock(dev));
1170 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1173 else if (r->com.owner != slave)
1178 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1179 __func__, r->com.res_id);
1183 case RES_QP_RESERVED:
1184 if (r->com.state == RES_QP_MAPPED && !alloc)
1187 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1192 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1193 r->com.state == RES_QP_HW)
1196 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1204 if (r->com.state != RES_QP_MAPPED)
1212 r->com.from_state = r->com.state;
1213 r->com.to_state = state;
1214 r->com.state = RES_QP_BUSY;
1220 spin_unlock_irq(mlx4_tlock(dev));
1225 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1226 enum res_mpt_states state, struct res_mpt **mpt)
1228 struct mlx4_priv *priv = mlx4_priv(dev);
1229 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1233 spin_lock_irq(mlx4_tlock(dev));
1234 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1237 else if (r->com.owner != slave)
1245 case RES_MPT_RESERVED:
1246 if (r->com.state != RES_MPT_MAPPED)
1250 case RES_MPT_MAPPED:
1251 if (r->com.state != RES_MPT_RESERVED &&
1252 r->com.state != RES_MPT_HW)
1257 if (r->com.state != RES_MPT_MAPPED)
1265 r->com.from_state = r->com.state;
1266 r->com.to_state = state;
1267 r->com.state = RES_MPT_BUSY;
1273 spin_unlock_irq(mlx4_tlock(dev));
1278 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1279 enum res_eq_states state, struct res_eq **eq)
1281 struct mlx4_priv *priv = mlx4_priv(dev);
1282 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1286 spin_lock_irq(mlx4_tlock(dev));
1287 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1290 else if (r->com.owner != slave)
1298 case RES_EQ_RESERVED:
1299 if (r->com.state != RES_EQ_HW)
1304 if (r->com.state != RES_EQ_RESERVED)
1313 r->com.from_state = r->com.state;
1314 r->com.to_state = state;
1315 r->com.state = RES_EQ_BUSY;
1321 spin_unlock_irq(mlx4_tlock(dev));
1326 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1327 enum res_cq_states state, struct res_cq **cq)
1329 struct mlx4_priv *priv = mlx4_priv(dev);
1330 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1334 spin_lock_irq(mlx4_tlock(dev));
1335 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1338 else if (r->com.owner != slave)
1346 case RES_CQ_ALLOCATED:
1347 if (r->com.state != RES_CQ_HW)
1349 else if (atomic_read(&r->ref_count))
1356 if (r->com.state != RES_CQ_ALLOCATED)
1367 r->com.from_state = r->com.state;
1368 r->com.to_state = state;
1369 r->com.state = RES_CQ_BUSY;
1375 spin_unlock_irq(mlx4_tlock(dev));
1380 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1381 enum res_cq_states state, struct res_srq **srq)
1383 struct mlx4_priv *priv = mlx4_priv(dev);
1384 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1388 spin_lock_irq(mlx4_tlock(dev));
1389 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1392 else if (r->com.owner != slave)
1400 case RES_SRQ_ALLOCATED:
1401 if (r->com.state != RES_SRQ_HW)
1403 else if (atomic_read(&r->ref_count))
1408 if (r->com.state != RES_SRQ_ALLOCATED)
1417 r->com.from_state = r->com.state;
1418 r->com.to_state = state;
1419 r->com.state = RES_SRQ_BUSY;
1425 spin_unlock_irq(mlx4_tlock(dev));
1430 static void res_abort_move(struct mlx4_dev *dev, int slave,
1431 enum mlx4_resource type, int id)
1433 struct mlx4_priv *priv = mlx4_priv(dev);
1434 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1435 struct res_common *r;
1437 spin_lock_irq(mlx4_tlock(dev));
1438 r = res_tracker_lookup(&tracker->res_tree[type], id);
1439 if (r && (r->owner == slave))
1440 r->state = r->from_state;
1441 spin_unlock_irq(mlx4_tlock(dev));
1444 static void res_end_move(struct mlx4_dev *dev, int slave,
1445 enum mlx4_resource type, int id)
1447 struct mlx4_priv *priv = mlx4_priv(dev);
1448 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1449 struct res_common *r;
1451 spin_lock_irq(mlx4_tlock(dev));
1452 r = res_tracker_lookup(&tracker->res_tree[type], id);
1453 if (r && (r->owner == slave))
1454 r->state = r->to_state;
1455 spin_unlock_irq(mlx4_tlock(dev));
1458 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1460 return mlx4_is_qp_reserved(dev, qpn) &&
1461 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1464 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1466 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1469 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1470 u64 in_param, u64 *out_param)
1479 case RES_OP_RESERVE:
1480 count = get_param_l(&in_param);
1481 align = get_param_h(&in_param);
1482 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1486 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1488 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1492 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1494 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1495 __mlx4_qp_release_range(dev, base, count);
1498 set_param_l(out_param, base);
1500 case RES_OP_MAP_ICM:
1501 qpn = get_param_l(&in_param) & 0x7fffff;
1502 if (valid_reserved(dev, slave, qpn)) {
1503 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1508 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1513 if (!fw_reserved(dev, qpn)) {
1514 err = __mlx4_qp_alloc_icm(dev, qpn);
1516 res_abort_move(dev, slave, RES_QP, qpn);
1521 res_end_move(dev, slave, RES_QP, qpn);
1531 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1532 u64 in_param, u64 *out_param)
1538 if (op != RES_OP_RESERVE_AND_MAP)
1541 order = get_param_l(&in_param);
1543 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1547 base = __mlx4_alloc_mtt_range(dev, order);
1549 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1553 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1555 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1556 __mlx4_free_mtt_range(dev, base, order);
1558 set_param_l(out_param, base);
1564 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1565 u64 in_param, u64 *out_param)
1570 struct res_mpt *mpt;
1573 case RES_OP_RESERVE:
1574 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1578 index = __mlx4_mpt_reserve(dev);
1580 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1583 id = index & mpt_mask(dev);
1585 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1587 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1588 __mlx4_mpt_release(dev, index);
1591 set_param_l(out_param, index);
1593 case RES_OP_MAP_ICM:
1594 index = get_param_l(&in_param);
1595 id = index & mpt_mask(dev);
1596 err = mr_res_start_move_to(dev, slave, id,
1597 RES_MPT_MAPPED, &mpt);
1601 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1603 res_abort_move(dev, slave, RES_MPT, id);
1607 res_end_move(dev, slave, RES_MPT, id);
1613 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1614 u64 in_param, u64 *out_param)
1620 case RES_OP_RESERVE_AND_MAP:
1621 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1625 err = __mlx4_cq_alloc_icm(dev, &cqn);
1627 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1631 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1633 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1634 __mlx4_cq_free_icm(dev, cqn);
1638 set_param_l(out_param, cqn);
1648 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1649 u64 in_param, u64 *out_param)
1655 case RES_OP_RESERVE_AND_MAP:
1656 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1660 err = __mlx4_srq_alloc_icm(dev, &srqn);
1662 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1666 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1668 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1669 __mlx4_srq_free_icm(dev, srqn);
1673 set_param_l(out_param, srqn);
1683 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1685 struct mlx4_priv *priv = mlx4_priv(dev);
1686 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1687 struct mac_res *res;
1689 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1691 res = kzalloc(sizeof *res, GFP_KERNEL);
1693 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1697 res->port = (u8) port;
1698 list_add_tail(&res->list,
1699 &tracker->slave_list[slave].res_list[RES_MAC]);
1703 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1706 struct mlx4_priv *priv = mlx4_priv(dev);
1707 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1708 struct list_head *mac_list =
1709 &tracker->slave_list[slave].res_list[RES_MAC];
1710 struct mac_res *res, *tmp;
1712 list_for_each_entry_safe(res, tmp, mac_list, list) {
1713 if (res->mac == mac && res->port == (u8) port) {
1714 list_del(&res->list);
1715 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1722 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1724 struct mlx4_priv *priv = mlx4_priv(dev);
1725 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1726 struct list_head *mac_list =
1727 &tracker->slave_list[slave].res_list[RES_MAC];
1728 struct mac_res *res, *tmp;
1730 list_for_each_entry_safe(res, tmp, mac_list, list) {
1731 list_del(&res->list);
1732 __mlx4_unregister_mac(dev, res->port, res->mac);
1733 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1738 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1739 u64 in_param, u64 *out_param, int in_port)
1745 if (op != RES_OP_RESERVE_AND_MAP)
1748 port = !in_port ? get_param_l(out_param) : in_port;
1751 err = __mlx4_register_mac(dev, port, mac);
1753 set_param_l(out_param, err);
1758 err = mac_add_to_slave(dev, slave, mac, port);
1760 __mlx4_unregister_mac(dev, port, mac);
1765 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1766 int port, int vlan_index)
1768 struct mlx4_priv *priv = mlx4_priv(dev);
1769 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1770 struct list_head *vlan_list =
1771 &tracker->slave_list[slave].res_list[RES_VLAN];
1772 struct vlan_res *res, *tmp;
1774 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1775 if (res->vlan == vlan && res->port == (u8) port) {
1776 /* vlan found. update ref count */
1782 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1784 res = kzalloc(sizeof(*res), GFP_KERNEL);
1786 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1790 res->port = (u8) port;
1791 res->vlan_index = vlan_index;
1793 list_add_tail(&res->list,
1794 &tracker->slave_list[slave].res_list[RES_VLAN]);
1799 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1802 struct mlx4_priv *priv = mlx4_priv(dev);
1803 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1804 struct list_head *vlan_list =
1805 &tracker->slave_list[slave].res_list[RES_VLAN];
1806 struct vlan_res *res, *tmp;
1808 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1809 if (res->vlan == vlan && res->port == (u8) port) {
1810 if (!--res->ref_count) {
1811 list_del(&res->list);
1812 mlx4_release_resource(dev, slave, RES_VLAN,
1821 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1823 struct mlx4_priv *priv = mlx4_priv(dev);
1824 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1825 struct list_head *vlan_list =
1826 &tracker->slave_list[slave].res_list[RES_VLAN];
1827 struct vlan_res *res, *tmp;
1830 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1831 list_del(&res->list);
1832 /* dereference the vlan the num times the slave referenced it */
1833 for (i = 0; i < res->ref_count; i++)
1834 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1835 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1840 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1841 u64 in_param, u64 *out_param, int in_port)
1843 struct mlx4_priv *priv = mlx4_priv(dev);
1844 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1850 port = !in_port ? get_param_l(out_param) : in_port;
1852 if (!port || op != RES_OP_RESERVE_AND_MAP)
1855 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1856 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1857 slave_state[slave].old_vlan_api = true;
1861 vlan = (u16) in_param;
1863 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1865 set_param_l(out_param, (u32) vlan_index);
1866 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1868 __mlx4_unregister_vlan(dev, port, vlan);
1873 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1874 u64 in_param, u64 *out_param)
1879 if (op != RES_OP_RESERVE)
1882 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1886 err = __mlx4_counter_alloc(dev, &index);
1888 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1892 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1894 __mlx4_counter_free(dev, index);
1895 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1897 set_param_l(out_param, index);
1903 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1904 u64 in_param, u64 *out_param)
1909 if (op != RES_OP_RESERVE)
1912 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1916 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1918 __mlx4_xrcd_free(dev, xrcdn);
1920 set_param_l(out_param, xrcdn);
1925 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1926 struct mlx4_vhcr *vhcr,
1927 struct mlx4_cmd_mailbox *inbox,
1928 struct mlx4_cmd_mailbox *outbox,
1929 struct mlx4_cmd_info *cmd)
1932 int alop = vhcr->op_modifier;
1934 switch (vhcr->in_modifier & 0xFF) {
1936 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1937 vhcr->in_param, &vhcr->out_param);
1941 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1942 vhcr->in_param, &vhcr->out_param);
1946 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1947 vhcr->in_param, &vhcr->out_param);
1951 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1952 vhcr->in_param, &vhcr->out_param);
1956 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1957 vhcr->in_param, &vhcr->out_param);
1961 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1962 vhcr->in_param, &vhcr->out_param,
1963 (vhcr->in_modifier >> 8) & 0xFF);
1967 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1968 vhcr->in_param, &vhcr->out_param,
1969 (vhcr->in_modifier >> 8) & 0xFF);
1973 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1974 vhcr->in_param, &vhcr->out_param);
1978 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1979 vhcr->in_param, &vhcr->out_param);
1990 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1999 case RES_OP_RESERVE:
2000 base = get_param_l(&in_param) & 0x7fffff;
2001 count = get_param_h(&in_param);
2002 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2005 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2006 __mlx4_qp_release_range(dev, base, count);
2008 case RES_OP_MAP_ICM:
2009 qpn = get_param_l(&in_param) & 0x7fffff;
2010 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2015 if (!fw_reserved(dev, qpn))
2016 __mlx4_qp_free_icm(dev, qpn);
2018 res_end_move(dev, slave, RES_QP, qpn);
2020 if (valid_reserved(dev, slave, qpn))
2021 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2030 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2031 u64 in_param, u64 *out_param)
2037 if (op != RES_OP_RESERVE_AND_MAP)
2040 base = get_param_l(&in_param);
2041 order = get_param_h(&in_param);
2042 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2044 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2045 __mlx4_free_mtt_range(dev, base, order);
2050 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2056 struct res_mpt *mpt;
2059 case RES_OP_RESERVE:
2060 index = get_param_l(&in_param);
2061 id = index & mpt_mask(dev);
2062 err = get_res(dev, slave, id, RES_MPT, &mpt);
2066 put_res(dev, slave, id, RES_MPT);
2068 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2071 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2072 __mlx4_mpt_release(dev, index);
2074 case RES_OP_MAP_ICM:
2075 index = get_param_l(&in_param);
2076 id = index & mpt_mask(dev);
2077 err = mr_res_start_move_to(dev, slave, id,
2078 RES_MPT_RESERVED, &mpt);
2082 __mlx4_mpt_free_icm(dev, mpt->key);
2083 res_end_move(dev, slave, RES_MPT, id);
2093 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2094 u64 in_param, u64 *out_param)
2100 case RES_OP_RESERVE_AND_MAP:
2101 cqn = get_param_l(&in_param);
2102 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2106 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2107 __mlx4_cq_free_icm(dev, cqn);
2118 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2119 u64 in_param, u64 *out_param)
2125 case RES_OP_RESERVE_AND_MAP:
2126 srqn = get_param_l(&in_param);
2127 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2131 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2132 __mlx4_srq_free_icm(dev, srqn);
2143 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2144 u64 in_param, u64 *out_param, int in_port)
2150 case RES_OP_RESERVE_AND_MAP:
2151 port = !in_port ? get_param_l(out_param) : in_port;
2152 mac_del_from_slave(dev, slave, in_param, port);
2153 __mlx4_unregister_mac(dev, port, in_param);
2164 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2165 u64 in_param, u64 *out_param, int port)
2167 struct mlx4_priv *priv = mlx4_priv(dev);
2168 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2172 case RES_OP_RESERVE_AND_MAP:
2173 if (slave_state[slave].old_vlan_api)
2177 vlan_del_from_slave(dev, slave, in_param, port);
2178 __mlx4_unregister_vlan(dev, port, in_param);
2188 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2189 u64 in_param, u64 *out_param)
2194 if (op != RES_OP_RESERVE)
2197 index = get_param_l(&in_param);
2198 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2202 __mlx4_counter_free(dev, index);
2203 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2208 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2209 u64 in_param, u64 *out_param)
2214 if (op != RES_OP_RESERVE)
2217 xrcdn = get_param_l(&in_param);
2218 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2222 __mlx4_xrcd_free(dev, xrcdn);
2227 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2228 struct mlx4_vhcr *vhcr,
2229 struct mlx4_cmd_mailbox *inbox,
2230 struct mlx4_cmd_mailbox *outbox,
2231 struct mlx4_cmd_info *cmd)
2234 int alop = vhcr->op_modifier;
2236 switch (vhcr->in_modifier & 0xFF) {
2238 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2243 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2244 vhcr->in_param, &vhcr->out_param);
2248 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2253 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2254 vhcr->in_param, &vhcr->out_param);
2258 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2259 vhcr->in_param, &vhcr->out_param);
2263 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2264 vhcr->in_param, &vhcr->out_param,
2265 (vhcr->in_modifier >> 8) & 0xFF);
2269 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2270 vhcr->in_param, &vhcr->out_param,
2271 (vhcr->in_modifier >> 8) & 0xFF);
2275 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2276 vhcr->in_param, &vhcr->out_param);
2280 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2281 vhcr->in_param, &vhcr->out_param);
2289 /* ugly but other choices are uglier */
2290 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2292 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2295 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2297 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2300 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2302 return be32_to_cpu(mpt->mtt_sz);
2305 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2307 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2310 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2312 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2315 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2317 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2320 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2322 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2325 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2327 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2330 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2332 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2335 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2337 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2338 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2339 int log_sq_sride = qpc->sq_size_stride & 7;
2340 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2341 int log_rq_stride = qpc->rq_size_stride & 7;
2342 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2343 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2344 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2345 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2350 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2352 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2353 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2354 total_mem = sq_size + rq_size;
2356 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2362 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2363 int size, struct res_mtt *mtt)
2365 int res_start = mtt->com.res_id;
2366 int res_size = (1 << mtt->order);
2368 if (start < res_start || start + size > res_start + res_size)
2373 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2374 struct mlx4_vhcr *vhcr,
2375 struct mlx4_cmd_mailbox *inbox,
2376 struct mlx4_cmd_mailbox *outbox,
2377 struct mlx4_cmd_info *cmd)
2380 int index = vhcr->in_modifier;
2381 struct res_mtt *mtt;
2382 struct res_mpt *mpt;
2383 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2389 id = index & mpt_mask(dev);
2390 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2394 /* Disable memory windows for VFs. */
2395 if (!mr_is_region(inbox->buf)) {
2400 /* Make sure that the PD bits related to the slave id are zeros. */
2401 pd = mr_get_pd(inbox->buf);
2402 pd_slave = (pd >> 17) & 0x7f;
2403 if (pd_slave != 0 && pd_slave != slave) {
2408 if (mr_is_fmr(inbox->buf)) {
2409 /* FMR and Bind Enable are forbidden in slave devices. */
2410 if (mr_is_bind_enabled(inbox->buf)) {
2414 /* FMR and Memory Windows are also forbidden. */
2415 if (!mr_is_region(inbox->buf)) {
2421 phys = mr_phys_mpt(inbox->buf);
2423 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2427 err = check_mtt_range(dev, slave, mtt_base,
2428 mr_get_mtt_size(inbox->buf), mtt);
2435 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2440 atomic_inc(&mtt->ref_count);
2441 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2444 res_end_move(dev, slave, RES_MPT, id);
2449 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2451 res_abort_move(dev, slave, RES_MPT, id);
2456 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2457 struct mlx4_vhcr *vhcr,
2458 struct mlx4_cmd_mailbox *inbox,
2459 struct mlx4_cmd_mailbox *outbox,
2460 struct mlx4_cmd_info *cmd)
2463 int index = vhcr->in_modifier;
2464 struct res_mpt *mpt;
2467 id = index & mpt_mask(dev);
2468 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2472 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2477 atomic_dec(&mpt->mtt->ref_count);
2479 res_end_move(dev, slave, RES_MPT, id);
2483 res_abort_move(dev, slave, RES_MPT, id);
2488 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2489 struct mlx4_vhcr *vhcr,
2490 struct mlx4_cmd_mailbox *inbox,
2491 struct mlx4_cmd_mailbox *outbox,
2492 struct mlx4_cmd_info *cmd)
2495 int index = vhcr->in_modifier;
2496 struct res_mpt *mpt;
2499 id = index & mpt_mask(dev);
2500 err = get_res(dev, slave, id, RES_MPT, &mpt);
2504 if (mpt->com.from_state != RES_MPT_HW) {
2509 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2512 put_res(dev, slave, id, RES_MPT);
2516 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2518 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2521 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2523 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2526 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2528 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2531 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2532 struct mlx4_qp_context *context)
2534 u32 qpn = vhcr->in_modifier & 0xffffff;
2537 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2540 /* adjust qkey in qp context */
2541 context->qkey = cpu_to_be32(qkey);
2544 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2545 struct mlx4_vhcr *vhcr,
2546 struct mlx4_cmd_mailbox *inbox,
2547 struct mlx4_cmd_mailbox *outbox,
2548 struct mlx4_cmd_info *cmd)
2551 int qpn = vhcr->in_modifier & 0x7fffff;
2552 struct res_mtt *mtt;
2554 struct mlx4_qp_context *qpc = inbox->buf + 8;
2555 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2556 int mtt_size = qp_get_mtt_size(qpc);
2559 int rcqn = qp_get_rcqn(qpc);
2560 int scqn = qp_get_scqn(qpc);
2561 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2562 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2563 struct res_srq *srq;
2564 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2566 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2569 qp->local_qpn = local_qpn;
2570 qp->sched_queue = 0;
2571 qp->qpc_flags = be32_to_cpu(qpc->flags);
2573 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2577 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2581 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2586 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2593 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2598 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2599 update_pkey_index(dev, slave, inbox);
2600 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2603 atomic_inc(&mtt->ref_count);
2605 atomic_inc(&rcq->ref_count);
2607 atomic_inc(&scq->ref_count);
2611 put_res(dev, slave, scqn, RES_CQ);
2614 atomic_inc(&srq->ref_count);
2615 put_res(dev, slave, srqn, RES_SRQ);
2618 put_res(dev, slave, rcqn, RES_CQ);
2619 put_res(dev, slave, mtt_base, RES_MTT);
2620 res_end_move(dev, slave, RES_QP, qpn);
2626 put_res(dev, slave, srqn, RES_SRQ);
2629 put_res(dev, slave, scqn, RES_CQ);
2631 put_res(dev, slave, rcqn, RES_CQ);
2633 put_res(dev, slave, mtt_base, RES_MTT);
2635 res_abort_move(dev, slave, RES_QP, qpn);
2640 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2642 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2645 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2647 int log_eq_size = eqc->log_eq_size & 0x1f;
2648 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2650 if (log_eq_size + 5 < page_shift)
2653 return 1 << (log_eq_size + 5 - page_shift);
2656 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2658 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2661 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2663 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2664 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2666 if (log_cq_size + 5 < page_shift)
2669 return 1 << (log_cq_size + 5 - page_shift);
2672 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2673 struct mlx4_vhcr *vhcr,
2674 struct mlx4_cmd_mailbox *inbox,
2675 struct mlx4_cmd_mailbox *outbox,
2676 struct mlx4_cmd_info *cmd)
2679 int eqn = vhcr->in_modifier;
2680 int res_id = (slave << 8) | eqn;
2681 struct mlx4_eq_context *eqc = inbox->buf;
2682 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2683 int mtt_size = eq_get_mtt_size(eqc);
2685 struct res_mtt *mtt;
2687 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2690 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2694 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2698 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2702 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2706 atomic_inc(&mtt->ref_count);
2708 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2709 res_end_move(dev, slave, RES_EQ, res_id);
2713 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2715 res_abort_move(dev, slave, RES_EQ, res_id);
2717 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2721 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2722 int len, struct res_mtt **res)
2724 struct mlx4_priv *priv = mlx4_priv(dev);
2725 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2726 struct res_mtt *mtt;
2729 spin_lock_irq(mlx4_tlock(dev));
2730 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2732 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2734 mtt->com.from_state = mtt->com.state;
2735 mtt->com.state = RES_MTT_BUSY;
2740 spin_unlock_irq(mlx4_tlock(dev));
2745 static int verify_qp_parameters(struct mlx4_dev *dev,
2746 struct mlx4_cmd_mailbox *inbox,
2747 enum qp_transition transition, u8 slave)
2750 struct mlx4_qp_context *qp_ctx;
2751 enum mlx4_qp_optpar optpar;
2753 qp_ctx = inbox->buf + 8;
2754 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2755 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2760 switch (transition) {
2761 case QP_TRANS_INIT2RTR:
2762 case QP_TRANS_RTR2RTS:
2763 case QP_TRANS_RTS2RTS:
2764 case QP_TRANS_SQD2SQD:
2765 case QP_TRANS_SQD2RTS:
2766 if (slave != mlx4_master_func_num(dev))
2767 /* slaves have only gid index 0 */
2768 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2769 if (qp_ctx->pri_path.mgid_index)
2771 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2772 if (qp_ctx->alt_path.mgid_index)
2787 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2788 struct mlx4_vhcr *vhcr,
2789 struct mlx4_cmd_mailbox *inbox,
2790 struct mlx4_cmd_mailbox *outbox,
2791 struct mlx4_cmd_info *cmd)
2793 struct mlx4_mtt mtt;
2794 __be64 *page_list = inbox->buf;
2795 u64 *pg_list = (u64 *)page_list;
2797 struct res_mtt *rmtt = NULL;
2798 int start = be64_to_cpu(page_list[0]);
2799 int npages = vhcr->in_modifier;
2802 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2806 /* Call the SW implementation of write_mtt:
2807 * - Prepare a dummy mtt struct
2808 * - Translate inbox contents to simple addresses in host endianess */
2809 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2810 we don't really use it */
2813 for (i = 0; i < npages; ++i)
2814 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2816 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2817 ((u64 *)page_list + 2));
2820 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2825 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2826 struct mlx4_vhcr *vhcr,
2827 struct mlx4_cmd_mailbox *inbox,
2828 struct mlx4_cmd_mailbox *outbox,
2829 struct mlx4_cmd_info *cmd)
2831 int eqn = vhcr->in_modifier;
2832 int res_id = eqn | (slave << 8);
2836 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2840 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2844 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2848 atomic_dec(&eq->mtt->ref_count);
2849 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2850 res_end_move(dev, slave, RES_EQ, res_id);
2851 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2856 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2858 res_abort_move(dev, slave, RES_EQ, res_id);
2863 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2865 struct mlx4_priv *priv = mlx4_priv(dev);
2866 struct mlx4_slave_event_eq_info *event_eq;
2867 struct mlx4_cmd_mailbox *mailbox;
2868 u32 in_modifier = 0;
2873 if (!priv->mfunc.master.slave_state)
2876 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2878 /* Create the event only if the slave is registered */
2879 if (event_eq->eqn < 0)
2882 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2883 res_id = (slave << 8) | event_eq->eqn;
2884 err = get_res(dev, slave, res_id, RES_EQ, &req);
2888 if (req->com.from_state != RES_EQ_HW) {
2893 mailbox = mlx4_alloc_cmd_mailbox(dev);
2894 if (IS_ERR(mailbox)) {
2895 err = PTR_ERR(mailbox);
2899 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2901 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2904 memcpy(mailbox->buf, (u8 *) eqe, 28);
2906 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2908 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2909 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2912 put_res(dev, slave, res_id, RES_EQ);
2913 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2914 mlx4_free_cmd_mailbox(dev, mailbox);
2918 put_res(dev, slave, res_id, RES_EQ);
2921 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2925 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2926 struct mlx4_vhcr *vhcr,
2927 struct mlx4_cmd_mailbox *inbox,
2928 struct mlx4_cmd_mailbox *outbox,
2929 struct mlx4_cmd_info *cmd)
2931 int eqn = vhcr->in_modifier;
2932 int res_id = eqn | (slave << 8);
2936 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2940 if (eq->com.from_state != RES_EQ_HW) {
2945 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2948 put_res(dev, slave, res_id, RES_EQ);
2952 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2953 struct mlx4_vhcr *vhcr,
2954 struct mlx4_cmd_mailbox *inbox,
2955 struct mlx4_cmd_mailbox *outbox,
2956 struct mlx4_cmd_info *cmd)
2959 int cqn = vhcr->in_modifier;
2960 struct mlx4_cq_context *cqc = inbox->buf;
2961 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2963 struct res_mtt *mtt;
2965 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2968 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2971 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2974 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2977 atomic_inc(&mtt->ref_count);
2979 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2980 res_end_move(dev, slave, RES_CQ, cqn);
2984 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2986 res_abort_move(dev, slave, RES_CQ, cqn);
2990 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2991 struct mlx4_vhcr *vhcr,
2992 struct mlx4_cmd_mailbox *inbox,
2993 struct mlx4_cmd_mailbox *outbox,
2994 struct mlx4_cmd_info *cmd)
2997 int cqn = vhcr->in_modifier;
3000 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3003 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3006 atomic_dec(&cq->mtt->ref_count);
3007 res_end_move(dev, slave, RES_CQ, cqn);
3011 res_abort_move(dev, slave, RES_CQ, cqn);
3015 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3016 struct mlx4_vhcr *vhcr,
3017 struct mlx4_cmd_mailbox *inbox,
3018 struct mlx4_cmd_mailbox *outbox,
3019 struct mlx4_cmd_info *cmd)
3021 int cqn = vhcr->in_modifier;
3025 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3029 if (cq->com.from_state != RES_CQ_HW)
3032 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3034 put_res(dev, slave, cqn, RES_CQ);
3039 static int handle_resize(struct mlx4_dev *dev, int slave,
3040 struct mlx4_vhcr *vhcr,
3041 struct mlx4_cmd_mailbox *inbox,
3042 struct mlx4_cmd_mailbox *outbox,
3043 struct mlx4_cmd_info *cmd,
3047 struct res_mtt *orig_mtt;
3048 struct res_mtt *mtt;
3049 struct mlx4_cq_context *cqc = inbox->buf;
3050 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3052 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3056 if (orig_mtt != cq->mtt) {
3061 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3065 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3068 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3071 atomic_dec(&orig_mtt->ref_count);
3072 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3073 atomic_inc(&mtt->ref_count);
3075 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3079 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3081 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3087 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3088 struct mlx4_vhcr *vhcr,
3089 struct mlx4_cmd_mailbox *inbox,
3090 struct mlx4_cmd_mailbox *outbox,
3091 struct mlx4_cmd_info *cmd)
3093 int cqn = vhcr->in_modifier;
3097 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3101 if (cq->com.from_state != RES_CQ_HW)
3104 if (vhcr->op_modifier == 0) {
3105 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3109 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3111 put_res(dev, slave, cqn, RES_CQ);
3116 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3118 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3119 int log_rq_stride = srqc->logstride & 7;
3120 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3122 if (log_srq_size + log_rq_stride + 4 < page_shift)
3125 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3128 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3129 struct mlx4_vhcr *vhcr,
3130 struct mlx4_cmd_mailbox *inbox,
3131 struct mlx4_cmd_mailbox *outbox,
3132 struct mlx4_cmd_info *cmd)
3135 int srqn = vhcr->in_modifier;
3136 struct res_mtt *mtt;
3137 struct res_srq *srq;
3138 struct mlx4_srq_context *srqc = inbox->buf;
3139 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3141 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3144 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3147 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3150 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3155 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3159 atomic_inc(&mtt->ref_count);
3161 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3162 res_end_move(dev, slave, RES_SRQ, srqn);
3166 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3168 res_abort_move(dev, slave, RES_SRQ, srqn);
3173 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3174 struct mlx4_vhcr *vhcr,
3175 struct mlx4_cmd_mailbox *inbox,
3176 struct mlx4_cmd_mailbox *outbox,
3177 struct mlx4_cmd_info *cmd)
3180 int srqn = vhcr->in_modifier;
3181 struct res_srq *srq;
3183 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3186 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3189 atomic_dec(&srq->mtt->ref_count);
3191 atomic_dec(&srq->cq->ref_count);
3192 res_end_move(dev, slave, RES_SRQ, srqn);
3197 res_abort_move(dev, slave, RES_SRQ, srqn);
3202 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3203 struct mlx4_vhcr *vhcr,
3204 struct mlx4_cmd_mailbox *inbox,
3205 struct mlx4_cmd_mailbox *outbox,
3206 struct mlx4_cmd_info *cmd)
3209 int srqn = vhcr->in_modifier;
3210 struct res_srq *srq;
3212 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3215 if (srq->com.from_state != RES_SRQ_HW) {
3219 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3221 put_res(dev, slave, srqn, RES_SRQ);
3225 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3226 struct mlx4_vhcr *vhcr,
3227 struct mlx4_cmd_mailbox *inbox,
3228 struct mlx4_cmd_mailbox *outbox,
3229 struct mlx4_cmd_info *cmd)
3232 int srqn = vhcr->in_modifier;
3233 struct res_srq *srq;
3235 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3239 if (srq->com.from_state != RES_SRQ_HW) {
3244 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3246 put_res(dev, slave, srqn, RES_SRQ);
3250 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3251 struct mlx4_vhcr *vhcr,
3252 struct mlx4_cmd_mailbox *inbox,
3253 struct mlx4_cmd_mailbox *outbox,
3254 struct mlx4_cmd_info *cmd)
3257 int qpn = vhcr->in_modifier & 0x7fffff;
3260 err = get_res(dev, slave, qpn, RES_QP, &qp);
3263 if (qp->com.from_state != RES_QP_HW) {
3268 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3270 put_res(dev, slave, qpn, RES_QP);
3274 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3275 struct mlx4_vhcr *vhcr,
3276 struct mlx4_cmd_mailbox *inbox,
3277 struct mlx4_cmd_mailbox *outbox,
3278 struct mlx4_cmd_info *cmd)
3280 struct mlx4_qp_context *context = inbox->buf + 8;
3281 adjust_proxy_tun_qkey(dev, vhcr, context);
3282 update_pkey_index(dev, slave, inbox);
3283 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3286 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3287 struct mlx4_vhcr *vhcr,
3288 struct mlx4_cmd_mailbox *inbox,
3289 struct mlx4_cmd_mailbox *outbox,
3290 struct mlx4_cmd_info *cmd)
3293 struct mlx4_qp_context *qpc = inbox->buf + 8;
3294 int qpn = vhcr->in_modifier & 0x7fffff;
3296 u8 orig_sched_queue;
3298 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3302 update_pkey_index(dev, slave, inbox);
3303 update_gid(dev, inbox, (u8)slave);
3304 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3305 orig_sched_queue = qpc->pri_path.sched_queue;
3306 err = update_vport_qp_param(dev, inbox, slave, qpn);
3310 err = get_res(dev, slave, qpn, RES_QP, &qp);
3313 if (qp->com.from_state != RES_QP_HW) {
3318 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3320 /* if no error, save sched queue value passed in by VF. This is
3321 * essentially the QOS value provided by the VF. This will be useful
3322 * if we allow dynamic changes from VST back to VGT
3325 qp->sched_queue = orig_sched_queue;
3327 put_res(dev, slave, qpn, RES_QP);
3331 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3332 struct mlx4_vhcr *vhcr,
3333 struct mlx4_cmd_mailbox *inbox,
3334 struct mlx4_cmd_mailbox *outbox,
3335 struct mlx4_cmd_info *cmd)
3338 struct mlx4_qp_context *context = inbox->buf + 8;
3340 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3344 update_pkey_index(dev, slave, inbox);
3345 update_gid(dev, inbox, (u8)slave);
3346 adjust_proxy_tun_qkey(dev, vhcr, context);
3347 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3350 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3351 struct mlx4_vhcr *vhcr,
3352 struct mlx4_cmd_mailbox *inbox,
3353 struct mlx4_cmd_mailbox *outbox,
3354 struct mlx4_cmd_info *cmd)
3357 struct mlx4_qp_context *context = inbox->buf + 8;
3359 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3363 update_pkey_index(dev, slave, inbox);
3364 update_gid(dev, inbox, (u8)slave);
3365 adjust_proxy_tun_qkey(dev, vhcr, context);
3366 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3370 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3371 struct mlx4_vhcr *vhcr,
3372 struct mlx4_cmd_mailbox *inbox,
3373 struct mlx4_cmd_mailbox *outbox,
3374 struct mlx4_cmd_info *cmd)
3376 struct mlx4_qp_context *context = inbox->buf + 8;
3377 adjust_proxy_tun_qkey(dev, vhcr, context);
3378 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3381 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3382 struct mlx4_vhcr *vhcr,
3383 struct mlx4_cmd_mailbox *inbox,
3384 struct mlx4_cmd_mailbox *outbox,
3385 struct mlx4_cmd_info *cmd)
3388 struct mlx4_qp_context *context = inbox->buf + 8;
3390 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3394 adjust_proxy_tun_qkey(dev, vhcr, context);
3395 update_gid(dev, inbox, (u8)slave);
3396 update_pkey_index(dev, slave, inbox);
3397 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3400 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3401 struct mlx4_vhcr *vhcr,
3402 struct mlx4_cmd_mailbox *inbox,
3403 struct mlx4_cmd_mailbox *outbox,
3404 struct mlx4_cmd_info *cmd)
3407 struct mlx4_qp_context *context = inbox->buf + 8;
3409 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3413 adjust_proxy_tun_qkey(dev, vhcr, context);
3414 update_gid(dev, inbox, (u8)slave);
3415 update_pkey_index(dev, slave, inbox);
3416 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3419 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3420 struct mlx4_vhcr *vhcr,
3421 struct mlx4_cmd_mailbox *inbox,
3422 struct mlx4_cmd_mailbox *outbox,
3423 struct mlx4_cmd_info *cmd)
3426 int qpn = vhcr->in_modifier & 0x7fffff;
3429 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3432 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3436 atomic_dec(&qp->mtt->ref_count);
3437 atomic_dec(&qp->rcq->ref_count);
3438 atomic_dec(&qp->scq->ref_count);
3440 atomic_dec(&qp->srq->ref_count);
3441 res_end_move(dev, slave, RES_QP, qpn);
3445 res_abort_move(dev, slave, RES_QP, qpn);
3450 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3451 struct res_qp *rqp, u8 *gid)
3453 struct res_gid *res;
3455 list_for_each_entry(res, &rqp->mcg_list, list) {
3456 if (!memcmp(res->gid, gid, 16))
3462 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3463 u8 *gid, enum mlx4_protocol prot,
3464 enum mlx4_steer_type steer, u64 reg_id)
3466 struct res_gid *res;
3469 res = kzalloc(sizeof *res, GFP_KERNEL);
3473 spin_lock_irq(&rqp->mcg_spl);
3474 if (find_gid(dev, slave, rqp, gid)) {
3478 memcpy(res->gid, gid, 16);
3481 res->reg_id = reg_id;
3482 list_add_tail(&res->list, &rqp->mcg_list);
3485 spin_unlock_irq(&rqp->mcg_spl);
3490 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3491 u8 *gid, enum mlx4_protocol prot,
3492 enum mlx4_steer_type steer, u64 *reg_id)
3494 struct res_gid *res;
3497 spin_lock_irq(&rqp->mcg_spl);
3498 res = find_gid(dev, slave, rqp, gid);
3499 if (!res || res->prot != prot || res->steer != steer)
3502 *reg_id = res->reg_id;
3503 list_del(&res->list);
3507 spin_unlock_irq(&rqp->mcg_spl);
3512 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3513 int block_loopback, enum mlx4_protocol prot,
3514 enum mlx4_steer_type type, u64 *reg_id)
3516 switch (dev->caps.steering_mode) {
3517 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3518 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3519 block_loopback, prot,
3521 case MLX4_STEERING_MODE_B0:
3522 return mlx4_qp_attach_common(dev, qp, gid,
3523 block_loopback, prot, type);
3529 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3530 enum mlx4_protocol prot, enum mlx4_steer_type type,
3533 switch (dev->caps.steering_mode) {
3534 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3535 return mlx4_flow_detach(dev, reg_id);
3536 case MLX4_STEERING_MODE_B0:
3537 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3543 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3544 struct mlx4_vhcr *vhcr,
3545 struct mlx4_cmd_mailbox *inbox,
3546 struct mlx4_cmd_mailbox *outbox,
3547 struct mlx4_cmd_info *cmd)
3549 struct mlx4_qp qp; /* dummy for calling attach/detach */
3550 u8 *gid = inbox->buf;
3551 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3556 int attach = vhcr->op_modifier;
3557 int block_loopback = vhcr->in_modifier >> 31;
3558 u8 steer_type_mask = 2;
3559 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3561 qpn = vhcr->in_modifier & 0xffffff;
3562 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3568 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3571 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3574 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3578 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3582 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3584 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3587 put_res(dev, slave, qpn, RES_QP);
3591 qp_detach(dev, &qp, gid, prot, type, reg_id);
3593 put_res(dev, slave, qpn, RES_QP);
3598 * MAC validation for Flow Steering rules.
3599 * VF can attach rules only with a mac address which is assigned to it.
3601 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3602 struct list_head *rlist)
3604 struct mac_res *res, *tmp;
3607 /* make sure it isn't multicast or broadcast mac*/
3608 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3609 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3610 list_for_each_entry_safe(res, tmp, rlist, list) {
3611 be_mac = cpu_to_be64(res->mac << 16);
3612 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3615 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3616 eth_header->eth.dst_mac, slave);
3623 * In case of missing eth header, append eth header with a MAC address
3624 * assigned to the VF.
3626 static int add_eth_header(struct mlx4_dev *dev, int slave,
3627 struct mlx4_cmd_mailbox *inbox,
3628 struct list_head *rlist, int header_id)
3630 struct mac_res *res, *tmp;
3632 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3633 struct mlx4_net_trans_rule_hw_eth *eth_header;
3634 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3635 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3637 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3639 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3641 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3643 /* Clear a space in the inbox for eth header */
3644 switch (header_id) {
3645 case MLX4_NET_TRANS_RULE_ID_IPV4:
3647 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3648 memmove(ip_header, eth_header,
3649 sizeof(*ip_header) + sizeof(*l4_header));
3651 case MLX4_NET_TRANS_RULE_ID_TCP:
3652 case MLX4_NET_TRANS_RULE_ID_UDP:
3653 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3655 memmove(l4_header, eth_header, sizeof(*l4_header));
3660 list_for_each_entry_safe(res, tmp, rlist, list) {
3661 if (port == res->port) {
3662 be_mac = cpu_to_be64(res->mac << 16);
3667 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3672 memset(eth_header, 0, sizeof(*eth_header));
3673 eth_header->size = sizeof(*eth_header) >> 2;
3674 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3675 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3676 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3682 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3683 struct mlx4_vhcr *vhcr,
3684 struct mlx4_cmd_mailbox *inbox,
3685 struct mlx4_cmd_mailbox *outbox,
3686 struct mlx4_cmd_info *cmd)
3689 struct mlx4_priv *priv = mlx4_priv(dev);
3690 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3691 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3695 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3696 struct _rule_hw *rule_header;
3699 if (dev->caps.steering_mode !=
3700 MLX4_STEERING_MODE_DEVICE_MANAGED)
3703 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3704 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3705 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3707 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3710 rule_header = (struct _rule_hw *)(ctrl + 1);
3711 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3713 switch (header_id) {
3714 case MLX4_NET_TRANS_RULE_ID_ETH:
3715 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3720 case MLX4_NET_TRANS_RULE_ID_IB:
3722 case MLX4_NET_TRANS_RULE_ID_IPV4:
3723 case MLX4_NET_TRANS_RULE_ID_TCP:
3724 case MLX4_NET_TRANS_RULE_ID_UDP:
3725 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3726 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3730 vhcr->in_modifier +=
3731 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3734 pr_err("Corrupted mailbox.\n");
3739 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3740 vhcr->in_modifier, 0,
3741 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3746 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3748 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3750 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3751 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3755 atomic_inc(&rqp->ref_count);
3757 put_res(dev, slave, qpn, RES_QP);
3761 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3762 struct mlx4_vhcr *vhcr,
3763 struct mlx4_cmd_mailbox *inbox,
3764 struct mlx4_cmd_mailbox *outbox,
3765 struct mlx4_cmd_info *cmd)
3769 struct res_fs_rule *rrule;
3771 if (dev->caps.steering_mode !=
3772 MLX4_STEERING_MODE_DEVICE_MANAGED)
3775 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3778 /* Release the rule form busy state before removal */
3779 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3780 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3784 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3786 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3790 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3791 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3794 atomic_dec(&rqp->ref_count);
3796 put_res(dev, slave, rrule->qpn, RES_QP);
3801 BUSY_MAX_RETRIES = 10
3804 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3805 struct mlx4_vhcr *vhcr,
3806 struct mlx4_cmd_mailbox *inbox,
3807 struct mlx4_cmd_mailbox *outbox,
3808 struct mlx4_cmd_info *cmd)
3811 int index = vhcr->in_modifier & 0xffff;
3813 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3817 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3818 put_res(dev, slave, index, RES_COUNTER);
3822 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3824 struct res_gid *rgid;
3825 struct res_gid *tmp;
3826 struct mlx4_qp qp; /* dummy for calling attach/detach */
3828 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3829 switch (dev->caps.steering_mode) {
3830 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3831 mlx4_flow_detach(dev, rgid->reg_id);
3833 case MLX4_STEERING_MODE_B0:
3834 qp.qpn = rqp->local_qpn;
3835 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3836 rgid->prot, rgid->steer);
3839 list_del(&rgid->list);
3844 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3845 enum mlx4_resource type, int print)
3847 struct mlx4_priv *priv = mlx4_priv(dev);
3848 struct mlx4_resource_tracker *tracker =
3849 &priv->mfunc.master.res_tracker;
3850 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3851 struct res_common *r;
3852 struct res_common *tmp;
3856 spin_lock_irq(mlx4_tlock(dev));
3857 list_for_each_entry_safe(r, tmp, rlist, list) {
3858 if (r->owner == slave) {
3860 if (r->state == RES_ANY_BUSY) {
3863 "%s id 0x%llx is busy\n",
3868 r->from_state = r->state;
3869 r->state = RES_ANY_BUSY;
3875 spin_unlock_irq(mlx4_tlock(dev));
3880 static int move_all_busy(struct mlx4_dev *dev, int slave,
3881 enum mlx4_resource type)
3883 unsigned long begin;
3888 busy = _move_all_busy(dev, slave, type, 0);
3889 if (time_after(jiffies, begin + 5 * HZ))
3896 busy = _move_all_busy(dev, slave, type, 1);
3900 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3902 struct mlx4_priv *priv = mlx4_priv(dev);
3903 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3904 struct list_head *qp_list =
3905 &tracker->slave_list[slave].res_list[RES_QP];
3913 err = move_all_busy(dev, slave, RES_QP);
3915 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3916 "for slave %d\n", slave);
3918 spin_lock_irq(mlx4_tlock(dev));
3919 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3920 spin_unlock_irq(mlx4_tlock(dev));
3921 if (qp->com.owner == slave) {
3922 qpn = qp->com.res_id;
3923 detach_qp(dev, slave, qp);
3924 state = qp->com.from_state;
3925 while (state != 0) {
3927 case RES_QP_RESERVED:
3928 spin_lock_irq(mlx4_tlock(dev));
3929 rb_erase(&qp->com.node,
3930 &tracker->res_tree[RES_QP]);
3931 list_del(&qp->com.list);
3932 spin_unlock_irq(mlx4_tlock(dev));
3933 if (!valid_reserved(dev, slave, qpn)) {
3934 __mlx4_qp_release_range(dev, qpn, 1);
3935 mlx4_release_resource(dev, slave,
3942 if (!valid_reserved(dev, slave, qpn))
3943 __mlx4_qp_free_icm(dev, qpn);
3944 state = RES_QP_RESERVED;
3948 err = mlx4_cmd(dev, in_param,
3951 MLX4_CMD_TIME_CLASS_A,
3954 mlx4_dbg(dev, "rem_slave_qps: failed"
3955 " to move slave %d qpn %d to"
3958 atomic_dec(&qp->rcq->ref_count);
3959 atomic_dec(&qp->scq->ref_count);
3960 atomic_dec(&qp->mtt->ref_count);
3962 atomic_dec(&qp->srq->ref_count);
3963 state = RES_QP_MAPPED;
3970 spin_lock_irq(mlx4_tlock(dev));
3972 spin_unlock_irq(mlx4_tlock(dev));
3975 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3977 struct mlx4_priv *priv = mlx4_priv(dev);
3978 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3979 struct list_head *srq_list =
3980 &tracker->slave_list[slave].res_list[RES_SRQ];
3981 struct res_srq *srq;
3982 struct res_srq *tmp;
3989 err = move_all_busy(dev, slave, RES_SRQ);
3991 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3992 "busy for slave %d\n", slave);
3994 spin_lock_irq(mlx4_tlock(dev));
3995 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3996 spin_unlock_irq(mlx4_tlock(dev));
3997 if (srq->com.owner == slave) {
3998 srqn = srq->com.res_id;
3999 state = srq->com.from_state;
4000 while (state != 0) {
4002 case RES_SRQ_ALLOCATED:
4003 __mlx4_srq_free_icm(dev, srqn);
4004 spin_lock_irq(mlx4_tlock(dev));
4005 rb_erase(&srq->com.node,
4006 &tracker->res_tree[RES_SRQ]);
4007 list_del(&srq->com.list);
4008 spin_unlock_irq(mlx4_tlock(dev));
4009 mlx4_release_resource(dev, slave,
4017 err = mlx4_cmd(dev, in_param, srqn, 1,
4019 MLX4_CMD_TIME_CLASS_A,
4022 mlx4_dbg(dev, "rem_slave_srqs: failed"
4023 " to move slave %d srq %d to"
4027 atomic_dec(&srq->mtt->ref_count);
4029 atomic_dec(&srq->cq->ref_count);
4030 state = RES_SRQ_ALLOCATED;
4038 spin_lock_irq(mlx4_tlock(dev));
4040 spin_unlock_irq(mlx4_tlock(dev));
4043 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4045 struct mlx4_priv *priv = mlx4_priv(dev);
4046 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4047 struct list_head *cq_list =
4048 &tracker->slave_list[slave].res_list[RES_CQ];
4057 err = move_all_busy(dev, slave, RES_CQ);
4059 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4060 "busy for slave %d\n", slave);
4062 spin_lock_irq(mlx4_tlock(dev));
4063 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4064 spin_unlock_irq(mlx4_tlock(dev));
4065 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4066 cqn = cq->com.res_id;
4067 state = cq->com.from_state;
4068 while (state != 0) {
4070 case RES_CQ_ALLOCATED:
4071 __mlx4_cq_free_icm(dev, cqn);
4072 spin_lock_irq(mlx4_tlock(dev));
4073 rb_erase(&cq->com.node,
4074 &tracker->res_tree[RES_CQ]);
4075 list_del(&cq->com.list);
4076 spin_unlock_irq(mlx4_tlock(dev));
4077 mlx4_release_resource(dev, slave,
4085 err = mlx4_cmd(dev, in_param, cqn, 1,
4087 MLX4_CMD_TIME_CLASS_A,
4090 mlx4_dbg(dev, "rem_slave_cqs: failed"
4091 " to move slave %d cq %d to"
4094 atomic_dec(&cq->mtt->ref_count);
4095 state = RES_CQ_ALLOCATED;
4103 spin_lock_irq(mlx4_tlock(dev));
4105 spin_unlock_irq(mlx4_tlock(dev));
4108 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4110 struct mlx4_priv *priv = mlx4_priv(dev);
4111 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4112 struct list_head *mpt_list =
4113 &tracker->slave_list[slave].res_list[RES_MPT];
4114 struct res_mpt *mpt;
4115 struct res_mpt *tmp;
4122 err = move_all_busy(dev, slave, RES_MPT);
4124 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4125 "busy for slave %d\n", slave);
4127 spin_lock_irq(mlx4_tlock(dev));
4128 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4129 spin_unlock_irq(mlx4_tlock(dev));
4130 if (mpt->com.owner == slave) {
4131 mptn = mpt->com.res_id;
4132 state = mpt->com.from_state;
4133 while (state != 0) {
4135 case RES_MPT_RESERVED:
4136 __mlx4_mpt_release(dev, mpt->key);
4137 spin_lock_irq(mlx4_tlock(dev));
4138 rb_erase(&mpt->com.node,
4139 &tracker->res_tree[RES_MPT]);
4140 list_del(&mpt->com.list);
4141 spin_unlock_irq(mlx4_tlock(dev));
4142 mlx4_release_resource(dev, slave,
4148 case RES_MPT_MAPPED:
4149 __mlx4_mpt_free_icm(dev, mpt->key);
4150 state = RES_MPT_RESERVED;
4155 err = mlx4_cmd(dev, in_param, mptn, 0,
4157 MLX4_CMD_TIME_CLASS_A,
4160 mlx4_dbg(dev, "rem_slave_mrs: failed"
4161 " to move slave %d mpt %d to"
4165 atomic_dec(&mpt->mtt->ref_count);
4166 state = RES_MPT_MAPPED;
4173 spin_lock_irq(mlx4_tlock(dev));
4175 spin_unlock_irq(mlx4_tlock(dev));
4178 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4180 struct mlx4_priv *priv = mlx4_priv(dev);
4181 struct mlx4_resource_tracker *tracker =
4182 &priv->mfunc.master.res_tracker;
4183 struct list_head *mtt_list =
4184 &tracker->slave_list[slave].res_list[RES_MTT];
4185 struct res_mtt *mtt;
4186 struct res_mtt *tmp;
4192 err = move_all_busy(dev, slave, RES_MTT);
4194 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4195 "busy for slave %d\n", slave);
4197 spin_lock_irq(mlx4_tlock(dev));
4198 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4199 spin_unlock_irq(mlx4_tlock(dev));
4200 if (mtt->com.owner == slave) {
4201 base = mtt->com.res_id;
4202 state = mtt->com.from_state;
4203 while (state != 0) {
4205 case RES_MTT_ALLOCATED:
4206 __mlx4_free_mtt_range(dev, base,
4208 spin_lock_irq(mlx4_tlock(dev));
4209 rb_erase(&mtt->com.node,
4210 &tracker->res_tree[RES_MTT]);
4211 list_del(&mtt->com.list);
4212 spin_unlock_irq(mlx4_tlock(dev));
4213 mlx4_release_resource(dev, slave, RES_MTT,
4214 1 << mtt->order, 0);
4224 spin_lock_irq(mlx4_tlock(dev));
4226 spin_unlock_irq(mlx4_tlock(dev));
4229 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4231 struct mlx4_priv *priv = mlx4_priv(dev);
4232 struct mlx4_resource_tracker *tracker =
4233 &priv->mfunc.master.res_tracker;
4234 struct list_head *fs_rule_list =
4235 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4236 struct res_fs_rule *fs_rule;
4237 struct res_fs_rule *tmp;
4242 err = move_all_busy(dev, slave, RES_FS_RULE);
4244 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4247 spin_lock_irq(mlx4_tlock(dev));
4248 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4249 spin_unlock_irq(mlx4_tlock(dev));
4250 if (fs_rule->com.owner == slave) {
4251 base = fs_rule->com.res_id;
4252 state = fs_rule->com.from_state;
4253 while (state != 0) {
4255 case RES_FS_RULE_ALLOCATED:
4257 err = mlx4_cmd(dev, base, 0, 0,
4258 MLX4_QP_FLOW_STEERING_DETACH,
4259 MLX4_CMD_TIME_CLASS_A,
4262 spin_lock_irq(mlx4_tlock(dev));
4263 rb_erase(&fs_rule->com.node,
4264 &tracker->res_tree[RES_FS_RULE]);
4265 list_del(&fs_rule->com.list);
4266 spin_unlock_irq(mlx4_tlock(dev));
4276 spin_lock_irq(mlx4_tlock(dev));
4278 spin_unlock_irq(mlx4_tlock(dev));
4281 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4283 struct mlx4_priv *priv = mlx4_priv(dev);
4284 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4285 struct list_head *eq_list =
4286 &tracker->slave_list[slave].res_list[RES_EQ];
4293 struct mlx4_cmd_mailbox *mailbox;
4295 err = move_all_busy(dev, slave, RES_EQ);
4297 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4298 "busy for slave %d\n", slave);
4300 spin_lock_irq(mlx4_tlock(dev));
4301 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4302 spin_unlock_irq(mlx4_tlock(dev));
4303 if (eq->com.owner == slave) {
4304 eqn = eq->com.res_id;
4305 state = eq->com.from_state;
4306 while (state != 0) {
4308 case RES_EQ_RESERVED:
4309 spin_lock_irq(mlx4_tlock(dev));
4310 rb_erase(&eq->com.node,
4311 &tracker->res_tree[RES_EQ]);
4312 list_del(&eq->com.list);
4313 spin_unlock_irq(mlx4_tlock(dev));
4319 mailbox = mlx4_alloc_cmd_mailbox(dev);
4320 if (IS_ERR(mailbox)) {
4324 err = mlx4_cmd_box(dev, slave, 0,
4327 MLX4_CMD_TIME_CLASS_A,
4330 mlx4_dbg(dev, "rem_slave_eqs: failed"
4331 " to move slave %d eqs %d to"
4332 " SW ownership\n", slave, eqn);
4333 mlx4_free_cmd_mailbox(dev, mailbox);
4334 atomic_dec(&eq->mtt->ref_count);
4335 state = RES_EQ_RESERVED;
4343 spin_lock_irq(mlx4_tlock(dev));
4345 spin_unlock_irq(mlx4_tlock(dev));
4348 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4350 struct mlx4_priv *priv = mlx4_priv(dev);
4351 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4352 struct list_head *counter_list =
4353 &tracker->slave_list[slave].res_list[RES_COUNTER];
4354 struct res_counter *counter;
4355 struct res_counter *tmp;
4359 err = move_all_busy(dev, slave, RES_COUNTER);
4361 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4362 "busy for slave %d\n", slave);
4364 spin_lock_irq(mlx4_tlock(dev));
4365 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4366 if (counter->com.owner == slave) {
4367 index = counter->com.res_id;
4368 rb_erase(&counter->com.node,
4369 &tracker->res_tree[RES_COUNTER]);
4370 list_del(&counter->com.list);
4372 __mlx4_counter_free(dev, index);
4373 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4376 spin_unlock_irq(mlx4_tlock(dev));
4379 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4381 struct mlx4_priv *priv = mlx4_priv(dev);
4382 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4383 struct list_head *xrcdn_list =
4384 &tracker->slave_list[slave].res_list[RES_XRCD];
4385 struct res_xrcdn *xrcd;
4386 struct res_xrcdn *tmp;
4390 err = move_all_busy(dev, slave, RES_XRCD);
4392 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4393 "busy for slave %d\n", slave);
4395 spin_lock_irq(mlx4_tlock(dev));
4396 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4397 if (xrcd->com.owner == slave) {
4398 xrcdn = xrcd->com.res_id;
4399 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4400 list_del(&xrcd->com.list);
4402 __mlx4_xrcd_free(dev, xrcdn);
4405 spin_unlock_irq(mlx4_tlock(dev));
4408 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4410 struct mlx4_priv *priv = mlx4_priv(dev);
4412 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4413 rem_slave_vlans(dev, slave);
4414 rem_slave_macs(dev, slave);
4415 rem_slave_fs_rule(dev, slave);
4416 rem_slave_qps(dev, slave);
4417 rem_slave_srqs(dev, slave);
4418 rem_slave_cqs(dev, slave);
4419 rem_slave_mrs(dev, slave);
4420 rem_slave_eqs(dev, slave);
4421 rem_slave_mtts(dev, slave);
4422 rem_slave_counters(dev, slave);
4423 rem_slave_xrcdns(dev, slave);
4424 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4427 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4429 struct mlx4_vf_immed_vlan_work *work =
4430 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4431 struct mlx4_cmd_mailbox *mailbox;
4432 struct mlx4_update_qp_context *upd_context;
4433 struct mlx4_dev *dev = &work->priv->dev;
4434 struct mlx4_resource_tracker *tracker =
4435 &work->priv->mfunc.master.res_tracker;
4436 struct list_head *qp_list =
4437 &tracker->slave_list[work->slave].res_list[RES_QP];
4440 u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4441 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4442 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4443 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4444 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4445 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
4446 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4447 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4450 int port, errors = 0;
4453 if (mlx4_is_slave(dev)) {
4454 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4459 mailbox = mlx4_alloc_cmd_mailbox(dev);
4460 if (IS_ERR(mailbox))
4462 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4463 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4464 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4465 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4466 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4467 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4468 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4469 else if (!work->vlan_id)
4470 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4471 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4473 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4474 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4475 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4477 upd_context = mailbox->buf;
4478 upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4479 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4480 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4482 spin_lock_irq(mlx4_tlock(dev));
4483 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4484 spin_unlock_irq(mlx4_tlock(dev));
4485 if (qp->com.owner == work->slave) {
4486 if (qp->com.from_state != RES_QP_HW ||
4487 !qp->sched_queue || /* no INIT2RTR trans yet */
4488 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4489 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4490 spin_lock_irq(mlx4_tlock(dev));
4493 port = (qp->sched_queue >> 6 & 1) + 1;
4494 if (port != work->port) {
4495 spin_lock_irq(mlx4_tlock(dev));
4498 upd_context->qp_context.pri_path.sched_queue =
4499 qp->sched_queue & 0xC7;
4500 upd_context->qp_context.pri_path.sched_queue |=
4501 ((work->qos & 0x7) << 3);
4503 err = mlx4_cmd(dev, mailbox->dma,
4504 qp->local_qpn & 0xffffff,
4505 0, MLX4_CMD_UPDATE_QP,
4506 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4508 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4509 "port %d, qpn %d (%d)\n",
4510 work->slave, port, qp->local_qpn,
4515 spin_lock_irq(mlx4_tlock(dev));
4517 spin_unlock_irq(mlx4_tlock(dev));
4518 mlx4_free_cmd_mailbox(dev, mailbox);
4521 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4522 errors, work->slave, work->port);
4524 /* unregister previous vlan_id if needed and we had no errors
4525 * while updating the QPs
4527 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4528 NO_INDX != work->orig_vlan_ix)
4529 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4530 work->orig_vlan_id);