2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list;
64 struct list_head list;
72 struct list_head list;
80 const char *func_name;
88 struct list_head list;
90 enum mlx4_protocol prot;
91 enum mlx4_steer_type steer;
96 RES_QP_BUSY = RES_ANY_BUSY,
98 /* QP number was allocated */
101 /* ICM memory for QP context was mapped */
104 /* QP is in hw ownership */
109 struct res_common com;
114 struct list_head mcg_list;
119 /* saved qp params before VST enforcement in order to restore on VGT */
129 enum res_mtt_states {
130 RES_MTT_BUSY = RES_ANY_BUSY,
134 static inline const char *mtt_states_str(enum res_mtt_states state)
137 case RES_MTT_BUSY: return "RES_MTT_BUSY";
138 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
139 default: return "Unknown";
144 struct res_common com;
149 enum res_mpt_states {
150 RES_MPT_BUSY = RES_ANY_BUSY,
157 struct res_common com;
163 RES_EQ_BUSY = RES_ANY_BUSY,
169 struct res_common com;
174 RES_CQ_BUSY = RES_ANY_BUSY,
180 struct res_common com;
185 enum res_srq_states {
186 RES_SRQ_BUSY = RES_ANY_BUSY,
192 struct res_common com;
198 enum res_counter_states {
199 RES_COUNTER_BUSY = RES_ANY_BUSY,
200 RES_COUNTER_ALLOCATED,
204 struct res_common com;
208 enum res_xrcdn_states {
209 RES_XRCD_BUSY = RES_ANY_BUSY,
214 struct res_common com;
218 enum res_fs_rule_states {
219 RES_FS_RULE_BUSY = RES_ANY_BUSY,
220 RES_FS_RULE_ALLOCATED,
224 struct res_common com;
226 /* VF DMFS mbox with port flipped */
228 /* > 0 --> apply mirror when getting into HA mode */
229 /* = 0 --> un-apply mirror when getting out of HA mode */
231 struct list_head mirr_list;
235 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
237 struct rb_node *node = root->rb_node;
240 struct res_common *res = rb_entry(node, struct res_common,
243 if (res_id < res->res_id)
244 node = node->rb_left;
245 else if (res_id > res->res_id)
246 node = node->rb_right;
253 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
255 struct rb_node **new = &(root->rb_node), *parent = NULL;
257 /* Figure out where to put new node */
259 struct res_common *this = rb_entry(*new, struct res_common,
263 if (res->res_id < this->res_id)
264 new = &((*new)->rb_left);
265 else if (res->res_id > this->res_id)
266 new = &((*new)->rb_right);
271 /* Add new node and rebalance tree. */
272 rb_link_node(&res->node, parent, new);
273 rb_insert_color(&res->node, root);
288 static const char *resource_str(enum mlx4_resource rt)
291 case RES_QP: return "RES_QP";
292 case RES_CQ: return "RES_CQ";
293 case RES_SRQ: return "RES_SRQ";
294 case RES_MPT: return "RES_MPT";
295 case RES_MTT: return "RES_MTT";
296 case RES_MAC: return "RES_MAC";
297 case RES_VLAN: return "RES_VLAN";
298 case RES_EQ: return "RES_EQ";
299 case RES_COUNTER: return "RES_COUNTER";
300 case RES_FS_RULE: return "RES_FS_RULE";
301 case RES_XRCD: return "RES_XRCD";
302 default: return "Unknown resource type !!!";
306 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
307 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
308 enum mlx4_resource res_type, int count,
311 struct mlx4_priv *priv = mlx4_priv(dev);
312 struct resource_allocator *res_alloc =
313 &priv->mfunc.master.res_tracker.res_alloc[res_type];
315 int allocated, free, reserved, guaranteed, from_free;
318 if (slave > dev->persist->num_vfs)
321 spin_lock(&res_alloc->alloc_lock);
322 allocated = (port > 0) ?
323 res_alloc->allocated[(port - 1) *
324 (dev->persist->num_vfs + 1) + slave] :
325 res_alloc->allocated[slave];
326 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
328 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
329 res_alloc->res_reserved;
330 guaranteed = res_alloc->guaranteed[slave];
332 if (allocated + count > res_alloc->quota[slave]) {
333 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334 slave, port, resource_str(res_type), count,
335 allocated, res_alloc->quota[slave]);
339 if (allocated + count <= guaranteed) {
343 /* portion may need to be obtained from free area */
344 if (guaranteed - allocated > 0)
345 from_free = count - (guaranteed - allocated);
349 from_rsvd = count - from_free;
351 if (free - from_free >= reserved)
354 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355 slave, port, resource_str(res_type), free,
356 from_free, reserved);
360 /* grant the request */
362 res_alloc->allocated[(port - 1) *
363 (dev->persist->num_vfs + 1) + slave] += count;
364 res_alloc->res_port_free[port - 1] -= count;
365 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
367 res_alloc->allocated[slave] += count;
368 res_alloc->res_free -= count;
369 res_alloc->res_reserved -= from_rsvd;
374 spin_unlock(&res_alloc->alloc_lock);
378 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
379 enum mlx4_resource res_type, int count,
382 struct mlx4_priv *priv = mlx4_priv(dev);
383 struct resource_allocator *res_alloc =
384 &priv->mfunc.master.res_tracker.res_alloc[res_type];
385 int allocated, guaranteed, from_rsvd;
387 if (slave > dev->persist->num_vfs)
390 spin_lock(&res_alloc->alloc_lock);
392 allocated = (port > 0) ?
393 res_alloc->allocated[(port - 1) *
394 (dev->persist->num_vfs + 1) + slave] :
395 res_alloc->allocated[slave];
396 guaranteed = res_alloc->guaranteed[slave];
398 if (allocated - count >= guaranteed) {
401 /* portion may need to be returned to reserved area */
402 if (allocated - guaranteed > 0)
403 from_rsvd = count - (allocated - guaranteed);
409 res_alloc->allocated[(port - 1) *
410 (dev->persist->num_vfs + 1) + slave] -= count;
411 res_alloc->res_port_free[port - 1] += count;
412 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
414 res_alloc->allocated[slave] -= count;
415 res_alloc->res_free += count;
416 res_alloc->res_reserved += from_rsvd;
419 spin_unlock(&res_alloc->alloc_lock);
423 static inline void initialize_res_quotas(struct mlx4_dev *dev,
424 struct resource_allocator *res_alloc,
425 enum mlx4_resource res_type,
426 int vf, int num_instances)
428 res_alloc->guaranteed[vf] = num_instances /
429 (2 * (dev->persist->num_vfs + 1));
430 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
431 if (vf == mlx4_master_func_num(dev)) {
432 res_alloc->res_free = num_instances;
433 if (res_type == RES_MTT) {
434 /* reserved mtts will be taken out of the PF allocation */
435 res_alloc->res_free += dev->caps.reserved_mtts;
436 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
437 res_alloc->quota[vf] += dev->caps.reserved_mtts;
442 void mlx4_init_quotas(struct mlx4_dev *dev)
444 struct mlx4_priv *priv = mlx4_priv(dev);
447 /* quotas for VFs are initialized in mlx4_slave_cap */
448 if (mlx4_is_slave(dev))
451 if (!mlx4_is_mfunc(dev)) {
452 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
453 mlx4_num_reserved_sqps(dev);
454 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
455 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
456 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
457 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
461 pf = mlx4_master_func_num(dev);
463 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
465 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
467 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
469 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
471 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
474 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
476 /* reduce the sink counter */
477 return (dev->caps.max_counters - 1 -
478 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
482 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
484 struct mlx4_priv *priv = mlx4_priv(dev);
487 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
489 priv->mfunc.master.res_tracker.slave_list =
490 kcalloc(dev->num_slaves, sizeof(struct slave_list),
492 if (!priv->mfunc.master.res_tracker.slave_list)
495 for (i = 0 ; i < dev->num_slaves; i++) {
496 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
497 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
498 slave_list[i].res_list[t]);
499 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
502 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
504 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
505 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
507 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
508 struct resource_allocator *res_alloc =
509 &priv->mfunc.master.res_tracker.res_alloc[i];
510 res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1,
513 res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1,
516 if (i == RES_MAC || i == RES_VLAN)
517 res_alloc->allocated =
518 kcalloc(MLX4_MAX_PORTS *
519 (dev->persist->num_vfs + 1),
520 sizeof(int), GFP_KERNEL);
522 res_alloc->allocated =
523 kcalloc(dev->persist->num_vfs + 1,
524 sizeof(int), GFP_KERNEL);
525 /* Reduce the sink counter */
526 if (i == RES_COUNTER)
527 res_alloc->res_free = dev->caps.max_counters - 1;
529 if (!res_alloc->quota || !res_alloc->guaranteed ||
530 !res_alloc->allocated)
533 spin_lock_init(&res_alloc->alloc_lock);
534 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
535 struct mlx4_active_ports actv_ports =
536 mlx4_get_active_ports(dev, t);
539 initialize_res_quotas(dev, res_alloc, RES_QP,
540 t, dev->caps.num_qps -
541 dev->caps.reserved_qps -
542 mlx4_num_reserved_sqps(dev));
545 initialize_res_quotas(dev, res_alloc, RES_CQ,
546 t, dev->caps.num_cqs -
547 dev->caps.reserved_cqs);
550 initialize_res_quotas(dev, res_alloc, RES_SRQ,
551 t, dev->caps.num_srqs -
552 dev->caps.reserved_srqs);
555 initialize_res_quotas(dev, res_alloc, RES_MPT,
556 t, dev->caps.num_mpts -
557 dev->caps.reserved_mrws);
560 initialize_res_quotas(dev, res_alloc, RES_MTT,
561 t, dev->caps.num_mtts -
562 dev->caps.reserved_mtts);
565 if (t == mlx4_master_func_num(dev)) {
566 int max_vfs_pport = 0;
567 /* Calculate the max vfs per port for */
569 for (j = 0; j < dev->caps.num_ports;
571 struct mlx4_slaves_pport slaves_pport =
572 mlx4_phys_to_slaves_pport(dev, j + 1);
573 unsigned current_slaves =
574 bitmap_weight(slaves_pport.slaves,
575 dev->caps.num_ports) - 1;
576 if (max_vfs_pport < current_slaves)
580 res_alloc->quota[t] =
583 res_alloc->guaranteed[t] = 2;
584 for (j = 0; j < MLX4_MAX_PORTS; j++)
585 res_alloc->res_port_free[j] =
588 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
589 res_alloc->guaranteed[t] = 2;
593 if (t == mlx4_master_func_num(dev)) {
594 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
595 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
596 for (j = 0; j < MLX4_MAX_PORTS; j++)
597 res_alloc->res_port_free[j] =
600 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
601 res_alloc->guaranteed[t] = 0;
605 res_alloc->quota[t] = dev->caps.max_counters;
606 if (t == mlx4_master_func_num(dev))
607 res_alloc->guaranteed[t] =
608 MLX4_PF_COUNTERS_PER_PORT *
610 else if (t <= max_vfs_guarantee_counter)
611 res_alloc->guaranteed[t] =
612 MLX4_VF_COUNTERS_PER_PORT *
615 res_alloc->guaranteed[t] = 0;
620 if (i == RES_MAC || i == RES_VLAN) {
621 for (j = 0; j < dev->caps.num_ports; j++)
622 if (test_bit(j, actv_ports.ports))
623 res_alloc->res_port_rsvd[j] +=
624 res_alloc->guaranteed[t];
626 res_alloc->res_reserved += res_alloc->guaranteed[t];
630 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
634 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
635 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
636 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
637 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
638 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
639 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
640 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
645 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
646 enum mlx4_res_tracker_free_type type)
648 struct mlx4_priv *priv = mlx4_priv(dev);
651 if (priv->mfunc.master.res_tracker.slave_list) {
652 if (type != RES_TR_FREE_STRUCTS_ONLY) {
653 for (i = 0; i < dev->num_slaves; i++) {
654 if (type == RES_TR_FREE_ALL ||
655 dev->caps.function != i)
656 mlx4_delete_all_resources_for_slave(dev, i);
658 /* free master's vlans */
659 i = dev->caps.function;
660 mlx4_reset_roce_gids(dev, i);
661 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
662 rem_slave_vlans(dev, i);
663 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
666 if (type != RES_TR_FREE_SLAVES_ONLY) {
667 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
668 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
669 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
670 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
671 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
672 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
673 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
675 kfree(priv->mfunc.master.res_tracker.slave_list);
676 priv->mfunc.master.res_tracker.slave_list = NULL;
681 static void update_pkey_index(struct mlx4_dev *dev, int slave,
682 struct mlx4_cmd_mailbox *inbox)
684 u8 sched = *(u8 *)(inbox->buf + 64);
685 u8 orig_index = *(u8 *)(inbox->buf + 35);
687 struct mlx4_priv *priv = mlx4_priv(dev);
690 port = (sched >> 6 & 1) + 1;
692 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
693 *(u8 *)(inbox->buf + 35) = new_index;
696 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
699 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
700 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
701 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
704 if (MLX4_QP_ST_UD == ts) {
705 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
706 if (mlx4_is_eth(dev, port))
707 qp_ctx->pri_path.mgid_index =
708 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
710 qp_ctx->pri_path.mgid_index = slave | 0x80;
712 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
713 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
714 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
715 if (mlx4_is_eth(dev, port)) {
716 qp_ctx->pri_path.mgid_index +=
717 mlx4_get_base_gid_ix(dev, slave, port);
718 qp_ctx->pri_path.mgid_index &= 0x7f;
720 qp_ctx->pri_path.mgid_index = slave & 0x7F;
723 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
724 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
725 if (mlx4_is_eth(dev, port)) {
726 qp_ctx->alt_path.mgid_index +=
727 mlx4_get_base_gid_ix(dev, slave, port);
728 qp_ctx->alt_path.mgid_index &= 0x7f;
730 qp_ctx->alt_path.mgid_index = slave & 0x7F;
736 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
739 static int update_vport_qp_param(struct mlx4_dev *dev,
740 struct mlx4_cmd_mailbox *inbox,
743 struct mlx4_qp_context *qpc = inbox->buf + 8;
744 struct mlx4_vport_oper_state *vp_oper;
745 struct mlx4_priv *priv;
749 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
750 priv = mlx4_priv(dev);
751 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
752 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
754 err = handle_counter(dev, qpc, slave, port);
758 if (MLX4_VGT != vp_oper->state.default_vlan) {
759 /* the reserved QPs (special, proxy, tunnel)
760 * do not operate over vlans
762 if (mlx4_is_qp_reserved(dev, qpn))
765 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
766 if (qp_type == MLX4_QP_ST_UD ||
767 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
768 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
769 *(__be32 *)inbox->buf =
770 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
771 MLX4_QP_OPTPAR_VLAN_STRIPPING);
772 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
774 struct mlx4_update_qp_params params = {.flags = 0};
776 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
782 /* preserve IF_COUNTER flag */
783 qpc->pri_path.vlan_control &=
784 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
785 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
786 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
787 qpc->pri_path.vlan_control |=
788 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
789 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
790 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
791 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
792 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
793 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
794 } else if (0 != vp_oper->state.default_vlan) {
795 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
796 /* vst QinQ should block untagged on TX,
797 * but cvlan is in payload and phv is set so
798 * hw see it as untagged. Block tagged instead.
800 qpc->pri_path.vlan_control |=
801 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
802 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
803 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
804 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
805 } else { /* vst 802.1Q */
806 qpc->pri_path.vlan_control |=
807 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
808 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
809 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
811 } else { /* priority tagged */
812 qpc->pri_path.vlan_control |=
813 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
814 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
817 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
818 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
819 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
820 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
821 qpc->pri_path.fl |= MLX4_FL_SV;
823 qpc->pri_path.fl |= MLX4_FL_CV;
824 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
825 qpc->pri_path.sched_queue &= 0xC7;
826 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
827 qpc->qos_vport = vp_oper->state.qos_vport;
829 if (vp_oper->state.spoofchk) {
830 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
831 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
837 static int mpt_mask(struct mlx4_dev *dev)
839 return dev->caps.num_mpts - 1;
842 static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
868 return "INVALID RESOURCE";
872 static void *find_res(struct mlx4_dev *dev, u64 res_id,
873 enum mlx4_resource type)
875 struct mlx4_priv *priv = mlx4_priv(dev);
877 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
881 static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
882 enum mlx4_resource type,
883 void *res, const char *func_name)
885 struct res_common *r;
888 spin_lock_irq(mlx4_tlock(dev));
889 r = find_res(dev, res_id, type);
895 if (r->state == RES_ANY_BUSY) {
897 "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
898 func_name, slave, res_id, mlx4_resource_type_to_str(type),
904 if (r->owner != slave) {
909 r->from_state = r->state;
910 r->state = RES_ANY_BUSY;
911 r->func_name = func_name;
914 *((struct res_common **)res) = r;
917 spin_unlock_irq(mlx4_tlock(dev));
921 #define get_res(dev, slave, res_id, type, res) \
922 _get_res((dev), (slave), (res_id), (type), (res), __func__)
924 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
925 enum mlx4_resource type,
926 u64 res_id, int *slave)
929 struct res_common *r;
935 spin_lock(mlx4_tlock(dev));
937 r = find_res(dev, id, type);
942 spin_unlock(mlx4_tlock(dev));
947 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
948 enum mlx4_resource type)
950 struct res_common *r;
952 spin_lock_irq(mlx4_tlock(dev));
953 r = find_res(dev, res_id, type);
955 r->state = r->from_state;
958 spin_unlock_irq(mlx4_tlock(dev));
961 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
962 u64 in_param, u64 *out_param, int port);
964 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
967 struct res_common *r;
968 struct res_counter *counter;
971 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
974 spin_lock_irq(mlx4_tlock(dev));
975 r = find_res(dev, counter_index, RES_COUNTER);
976 if (!r || r->owner != slave) {
979 counter = container_of(r, struct res_counter, com);
981 counter->port = port;
984 spin_unlock_irq(mlx4_tlock(dev));
988 static int handle_unexisting_counter(struct mlx4_dev *dev,
989 struct mlx4_qp_context *qpc, u8 slave,
992 struct mlx4_priv *priv = mlx4_priv(dev);
993 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
994 struct res_common *tmp;
995 struct res_counter *counter;
996 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
999 spin_lock_irq(mlx4_tlock(dev));
1000 list_for_each_entry(tmp,
1001 &tracker->slave_list[slave].res_list[RES_COUNTER],
1003 counter = container_of(tmp, struct res_counter, com);
1004 if (port == counter->port) {
1005 qpc->pri_path.counter_index = counter->com.res_id;
1006 spin_unlock_irq(mlx4_tlock(dev));
1010 spin_unlock_irq(mlx4_tlock(dev));
1012 /* No existing counter, need to allocate a new counter */
1013 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1015 if (err == -ENOENT) {
1017 } else if (err && err != -ENOSPC) {
1018 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1019 __func__, slave, err);
1021 qpc->pri_path.counter_index = counter_idx;
1022 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1023 __func__, slave, qpc->pri_path.counter_index);
1030 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1033 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1034 return handle_existing_counter(dev, slave, port,
1035 qpc->pri_path.counter_index);
1037 return handle_unexisting_counter(dev, qpc, slave, port);
1040 static struct res_common *alloc_qp_tr(int id)
1044 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1048 ret->com.res_id = id;
1049 ret->com.state = RES_QP_RESERVED;
1050 ret->local_qpn = id;
1051 INIT_LIST_HEAD(&ret->mcg_list);
1052 spin_lock_init(&ret->mcg_spl);
1053 atomic_set(&ret->ref_count, 0);
1058 static struct res_common *alloc_mtt_tr(int id, int order)
1060 struct res_mtt *ret;
1062 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1066 ret->com.res_id = id;
1068 ret->com.state = RES_MTT_ALLOCATED;
1069 atomic_set(&ret->ref_count, 0);
1074 static struct res_common *alloc_mpt_tr(int id, int key)
1076 struct res_mpt *ret;
1078 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1082 ret->com.res_id = id;
1083 ret->com.state = RES_MPT_RESERVED;
1089 static struct res_common *alloc_eq_tr(int id)
1093 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1097 ret->com.res_id = id;
1098 ret->com.state = RES_EQ_RESERVED;
1103 static struct res_common *alloc_cq_tr(int id)
1107 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1111 ret->com.res_id = id;
1112 ret->com.state = RES_CQ_ALLOCATED;
1113 atomic_set(&ret->ref_count, 0);
1118 static struct res_common *alloc_srq_tr(int id)
1120 struct res_srq *ret;
1122 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1126 ret->com.res_id = id;
1127 ret->com.state = RES_SRQ_ALLOCATED;
1128 atomic_set(&ret->ref_count, 0);
1133 static struct res_common *alloc_counter_tr(int id, int port)
1135 struct res_counter *ret;
1137 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1141 ret->com.res_id = id;
1142 ret->com.state = RES_COUNTER_ALLOCATED;
1148 static struct res_common *alloc_xrcdn_tr(int id)
1150 struct res_xrcdn *ret;
1152 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1156 ret->com.res_id = id;
1157 ret->com.state = RES_XRCD_ALLOCATED;
1162 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1164 struct res_fs_rule *ret;
1166 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1170 ret->com.res_id = id;
1171 ret->com.state = RES_FS_RULE_ALLOCATED;
1176 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1179 struct res_common *ret;
1183 ret = alloc_qp_tr(id);
1186 ret = alloc_mpt_tr(id, extra);
1189 ret = alloc_mtt_tr(id, extra);
1192 ret = alloc_eq_tr(id);
1195 ret = alloc_cq_tr(id);
1198 ret = alloc_srq_tr(id);
1201 pr_err("implementation missing\n");
1204 ret = alloc_counter_tr(id, extra);
1207 ret = alloc_xrcdn_tr(id);
1210 ret = alloc_fs_rule_tr(id, extra);
1221 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1222 struct mlx4_counter *data)
1224 struct mlx4_priv *priv = mlx4_priv(dev);
1225 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1226 struct res_common *tmp;
1227 struct res_counter *counter;
1231 memset(data, 0, sizeof(*data));
1233 counters_arr = kmalloc_array(dev->caps.max_counters,
1234 sizeof(*counters_arr), GFP_KERNEL);
1238 spin_lock_irq(mlx4_tlock(dev));
1239 list_for_each_entry(tmp,
1240 &tracker->slave_list[slave].res_list[RES_COUNTER],
1242 counter = container_of(tmp, struct res_counter, com);
1243 if (counter->port == port) {
1244 counters_arr[i] = (int)tmp->res_id;
1248 spin_unlock_irq(mlx4_tlock(dev));
1249 counters_arr[i] = -1;
1253 while (counters_arr[i] != -1) {
1254 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1257 memset(data, 0, sizeof(*data));
1264 kfree(counters_arr);
1268 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1269 enum mlx4_resource type, int extra)
1273 struct mlx4_priv *priv = mlx4_priv(dev);
1274 struct res_common **res_arr;
1275 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1276 struct rb_root *root = &tracker->res_tree[type];
1278 res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
1282 for (i = 0; i < count; ++i) {
1283 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1285 for (--i; i >= 0; --i)
1293 spin_lock_irq(mlx4_tlock(dev));
1294 for (i = 0; i < count; ++i) {
1295 if (find_res(dev, base + i, type)) {
1299 err = res_tracker_insert(root, res_arr[i]);
1302 list_add_tail(&res_arr[i]->list,
1303 &tracker->slave_list[slave].res_list[type]);
1305 spin_unlock_irq(mlx4_tlock(dev));
1311 for (--i; i >= 0; --i) {
1312 rb_erase(&res_arr[i]->node, root);
1313 list_del_init(&res_arr[i]->list);
1316 spin_unlock_irq(mlx4_tlock(dev));
1318 for (i = 0; i < count; ++i)
1326 static int remove_qp_ok(struct res_qp *res)
1328 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1329 !list_empty(&res->mcg_list)) {
1330 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1331 res->com.state, atomic_read(&res->ref_count));
1333 } else if (res->com.state != RES_QP_RESERVED) {
1340 static int remove_mtt_ok(struct res_mtt *res, int order)
1342 if (res->com.state == RES_MTT_BUSY ||
1343 atomic_read(&res->ref_count)) {
1344 pr_devel("%s-%d: state %s, ref_count %d\n",
1346 mtt_states_str(res->com.state),
1347 atomic_read(&res->ref_count));
1349 } else if (res->com.state != RES_MTT_ALLOCATED)
1351 else if (res->order != order)
1357 static int remove_mpt_ok(struct res_mpt *res)
1359 if (res->com.state == RES_MPT_BUSY)
1361 else if (res->com.state != RES_MPT_RESERVED)
1367 static int remove_eq_ok(struct res_eq *res)
1369 if (res->com.state == RES_MPT_BUSY)
1371 else if (res->com.state != RES_MPT_RESERVED)
1377 static int remove_counter_ok(struct res_counter *res)
1379 if (res->com.state == RES_COUNTER_BUSY)
1381 else if (res->com.state != RES_COUNTER_ALLOCATED)
1387 static int remove_xrcdn_ok(struct res_xrcdn *res)
1389 if (res->com.state == RES_XRCD_BUSY)
1391 else if (res->com.state != RES_XRCD_ALLOCATED)
1397 static int remove_fs_rule_ok(struct res_fs_rule *res)
1399 if (res->com.state == RES_FS_RULE_BUSY)
1401 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1407 static int remove_cq_ok(struct res_cq *res)
1409 if (res->com.state == RES_CQ_BUSY)
1411 else if (res->com.state != RES_CQ_ALLOCATED)
1417 static int remove_srq_ok(struct res_srq *res)
1419 if (res->com.state == RES_SRQ_BUSY)
1421 else if (res->com.state != RES_SRQ_ALLOCATED)
1427 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1431 return remove_qp_ok((struct res_qp *)res);
1433 return remove_cq_ok((struct res_cq *)res);
1435 return remove_srq_ok((struct res_srq *)res);
1437 return remove_mpt_ok((struct res_mpt *)res);
1439 return remove_mtt_ok((struct res_mtt *)res, extra);
1443 return remove_eq_ok((struct res_eq *)res);
1445 return remove_counter_ok((struct res_counter *)res);
1447 return remove_xrcdn_ok((struct res_xrcdn *)res);
1449 return remove_fs_rule_ok((struct res_fs_rule *)res);
1455 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1456 enum mlx4_resource type, int extra)
1460 struct mlx4_priv *priv = mlx4_priv(dev);
1461 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1462 struct res_common *r;
1464 spin_lock_irq(mlx4_tlock(dev));
1465 for (i = base; i < base + count; ++i) {
1466 r = res_tracker_lookup(&tracker->res_tree[type], i);
1471 if (r->owner != slave) {
1475 err = remove_ok(r, type, extra);
1480 for (i = base; i < base + count; ++i) {
1481 r = res_tracker_lookup(&tracker->res_tree[type], i);
1482 rb_erase(&r->node, &tracker->res_tree[type]);
1489 spin_unlock_irq(mlx4_tlock(dev));
1494 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1495 enum res_qp_states state, struct res_qp **qp,
1498 struct mlx4_priv *priv = mlx4_priv(dev);
1499 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1503 spin_lock_irq(mlx4_tlock(dev));
1504 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1507 else if (r->com.owner != slave)
1512 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1513 __func__, r->com.res_id);
1517 case RES_QP_RESERVED:
1518 if (r->com.state == RES_QP_MAPPED && !alloc)
1521 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1526 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1527 r->com.state == RES_QP_HW)
1530 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1538 if (r->com.state != RES_QP_MAPPED)
1546 r->com.from_state = r->com.state;
1547 r->com.to_state = state;
1548 r->com.state = RES_QP_BUSY;
1554 spin_unlock_irq(mlx4_tlock(dev));
1559 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1560 enum res_mpt_states state, struct res_mpt **mpt)
1562 struct mlx4_priv *priv = mlx4_priv(dev);
1563 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1567 spin_lock_irq(mlx4_tlock(dev));
1568 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1571 else if (r->com.owner != slave)
1579 case RES_MPT_RESERVED:
1580 if (r->com.state != RES_MPT_MAPPED)
1584 case RES_MPT_MAPPED:
1585 if (r->com.state != RES_MPT_RESERVED &&
1586 r->com.state != RES_MPT_HW)
1591 if (r->com.state != RES_MPT_MAPPED)
1599 r->com.from_state = r->com.state;
1600 r->com.to_state = state;
1601 r->com.state = RES_MPT_BUSY;
1607 spin_unlock_irq(mlx4_tlock(dev));
1612 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1613 enum res_eq_states state, struct res_eq **eq)
1615 struct mlx4_priv *priv = mlx4_priv(dev);
1616 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1620 spin_lock_irq(mlx4_tlock(dev));
1621 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1624 else if (r->com.owner != slave)
1632 case RES_EQ_RESERVED:
1633 if (r->com.state != RES_EQ_HW)
1638 if (r->com.state != RES_EQ_RESERVED)
1647 r->com.from_state = r->com.state;
1648 r->com.to_state = state;
1649 r->com.state = RES_EQ_BUSY;
1653 spin_unlock_irq(mlx4_tlock(dev));
1661 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1662 enum res_cq_states state, struct res_cq **cq)
1664 struct mlx4_priv *priv = mlx4_priv(dev);
1665 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1669 spin_lock_irq(mlx4_tlock(dev));
1670 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1673 } else if (r->com.owner != slave) {
1675 } else if (state == RES_CQ_ALLOCATED) {
1676 if (r->com.state != RES_CQ_HW)
1678 else if (atomic_read(&r->ref_count))
1682 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1689 r->com.from_state = r->com.state;
1690 r->com.to_state = state;
1691 r->com.state = RES_CQ_BUSY;
1696 spin_unlock_irq(mlx4_tlock(dev));
1701 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1702 enum res_srq_states state, struct res_srq **srq)
1704 struct mlx4_priv *priv = mlx4_priv(dev);
1705 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1709 spin_lock_irq(mlx4_tlock(dev));
1710 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1713 } else if (r->com.owner != slave) {
1715 } else if (state == RES_SRQ_ALLOCATED) {
1716 if (r->com.state != RES_SRQ_HW)
1718 else if (atomic_read(&r->ref_count))
1720 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1725 r->com.from_state = r->com.state;
1726 r->com.to_state = state;
1727 r->com.state = RES_SRQ_BUSY;
1732 spin_unlock_irq(mlx4_tlock(dev));
1737 static void res_abort_move(struct mlx4_dev *dev, int slave,
1738 enum mlx4_resource type, int id)
1740 struct mlx4_priv *priv = mlx4_priv(dev);
1741 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1742 struct res_common *r;
1744 spin_lock_irq(mlx4_tlock(dev));
1745 r = res_tracker_lookup(&tracker->res_tree[type], id);
1746 if (r && (r->owner == slave))
1747 r->state = r->from_state;
1748 spin_unlock_irq(mlx4_tlock(dev));
1751 static void res_end_move(struct mlx4_dev *dev, int slave,
1752 enum mlx4_resource type, int id)
1754 struct mlx4_priv *priv = mlx4_priv(dev);
1755 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1756 struct res_common *r;
1758 spin_lock_irq(mlx4_tlock(dev));
1759 r = res_tracker_lookup(&tracker->res_tree[type], id);
1760 if (r && (r->owner == slave))
1761 r->state = r->to_state;
1762 spin_unlock_irq(mlx4_tlock(dev));
1765 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1767 return mlx4_is_qp_reserved(dev, qpn) &&
1768 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1771 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1773 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1776 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1777 u64 in_param, u64 *out_param)
1787 case RES_OP_RESERVE:
1788 count = get_param_l(&in_param) & 0xffffff;
1789 /* Turn off all unsupported QP allocation flags that the
1790 * slave tries to set.
1792 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1793 align = get_param_h(&in_param);
1794 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1798 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1800 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1804 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1806 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1807 __mlx4_qp_release_range(dev, base, count);
1810 set_param_l(out_param, base);
1812 case RES_OP_MAP_ICM:
1813 qpn = get_param_l(&in_param) & 0x7fffff;
1814 if (valid_reserved(dev, slave, qpn)) {
1815 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1820 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1825 if (!fw_reserved(dev, qpn)) {
1826 err = __mlx4_qp_alloc_icm(dev, qpn);
1828 res_abort_move(dev, slave, RES_QP, qpn);
1833 res_end_move(dev, slave, RES_QP, qpn);
1843 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1844 u64 in_param, u64 *out_param)
1850 if (op != RES_OP_RESERVE_AND_MAP)
1853 order = get_param_l(&in_param);
1855 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1859 base = __mlx4_alloc_mtt_range(dev, order);
1861 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1865 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1867 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1868 __mlx4_free_mtt_range(dev, base, order);
1870 set_param_l(out_param, base);
1876 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1877 u64 in_param, u64 *out_param)
1882 struct res_mpt *mpt;
1885 case RES_OP_RESERVE:
1886 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1890 index = __mlx4_mpt_reserve(dev);
1892 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1895 id = index & mpt_mask(dev);
1897 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1899 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1900 __mlx4_mpt_release(dev, index);
1903 set_param_l(out_param, index);
1905 case RES_OP_MAP_ICM:
1906 index = get_param_l(&in_param);
1907 id = index & mpt_mask(dev);
1908 err = mr_res_start_move_to(dev, slave, id,
1909 RES_MPT_MAPPED, &mpt);
1913 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1915 res_abort_move(dev, slave, RES_MPT, id);
1919 res_end_move(dev, slave, RES_MPT, id);
1925 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1926 u64 in_param, u64 *out_param)
1932 case RES_OP_RESERVE_AND_MAP:
1933 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1937 err = __mlx4_cq_alloc_icm(dev, &cqn);
1939 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1943 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1945 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1946 __mlx4_cq_free_icm(dev, cqn);
1950 set_param_l(out_param, cqn);
1960 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1961 u64 in_param, u64 *out_param)
1967 case RES_OP_RESERVE_AND_MAP:
1968 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1972 err = __mlx4_srq_alloc_icm(dev, &srqn);
1974 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1978 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1980 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1981 __mlx4_srq_free_icm(dev, srqn);
1985 set_param_l(out_param, srqn);
1995 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1996 u8 smac_index, u64 *mac)
1998 struct mlx4_priv *priv = mlx4_priv(dev);
1999 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2000 struct list_head *mac_list =
2001 &tracker->slave_list[slave].res_list[RES_MAC];
2002 struct mac_res *res, *tmp;
2004 list_for_each_entry_safe(res, tmp, mac_list, list) {
2005 if (res->smac_index == smac_index && res->port == (u8) port) {
2013 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2015 struct mlx4_priv *priv = mlx4_priv(dev);
2016 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2017 struct list_head *mac_list =
2018 &tracker->slave_list[slave].res_list[RES_MAC];
2019 struct mac_res *res, *tmp;
2021 list_for_each_entry_safe(res, tmp, mac_list, list) {
2022 if (res->mac == mac && res->port == (u8) port) {
2023 /* mac found. update ref count */
2029 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2031 res = kzalloc(sizeof(*res), GFP_KERNEL);
2033 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2037 res->port = (u8) port;
2038 res->smac_index = smac_index;
2040 list_add_tail(&res->list,
2041 &tracker->slave_list[slave].res_list[RES_MAC]);
2045 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2048 struct mlx4_priv *priv = mlx4_priv(dev);
2049 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2050 struct list_head *mac_list =
2051 &tracker->slave_list[slave].res_list[RES_MAC];
2052 struct mac_res *res, *tmp;
2054 list_for_each_entry_safe(res, tmp, mac_list, list) {
2055 if (res->mac == mac && res->port == (u8) port) {
2056 if (!--res->ref_count) {
2057 list_del(&res->list);
2058 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2066 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2068 struct mlx4_priv *priv = mlx4_priv(dev);
2069 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2070 struct list_head *mac_list =
2071 &tracker->slave_list[slave].res_list[RES_MAC];
2072 struct mac_res *res, *tmp;
2075 list_for_each_entry_safe(res, tmp, mac_list, list) {
2076 list_del(&res->list);
2077 /* dereference the mac the num times the slave referenced it */
2078 for (i = 0; i < res->ref_count; i++)
2079 __mlx4_unregister_mac(dev, res->port, res->mac);
2080 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2085 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2086 u64 in_param, u64 *out_param, int in_port)
2093 if (op != RES_OP_RESERVE_AND_MAP)
2096 port = !in_port ? get_param_l(out_param) : in_port;
2097 port = mlx4_slave_convert_port(
2104 err = __mlx4_register_mac(dev, port, mac);
2107 set_param_l(out_param, err);
2112 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2114 __mlx4_unregister_mac(dev, port, mac);
2119 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2120 int port, int vlan_index)
2122 struct mlx4_priv *priv = mlx4_priv(dev);
2123 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2124 struct list_head *vlan_list =
2125 &tracker->slave_list[slave].res_list[RES_VLAN];
2126 struct vlan_res *res, *tmp;
2128 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2129 if (res->vlan == vlan && res->port == (u8) port) {
2130 /* vlan found. update ref count */
2136 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2138 res = kzalloc(sizeof(*res), GFP_KERNEL);
2140 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2144 res->port = (u8) port;
2145 res->vlan_index = vlan_index;
2147 list_add_tail(&res->list,
2148 &tracker->slave_list[slave].res_list[RES_VLAN]);
2153 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2156 struct mlx4_priv *priv = mlx4_priv(dev);
2157 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2158 struct list_head *vlan_list =
2159 &tracker->slave_list[slave].res_list[RES_VLAN];
2160 struct vlan_res *res, *tmp;
2162 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2163 if (res->vlan == vlan && res->port == (u8) port) {
2164 if (!--res->ref_count) {
2165 list_del(&res->list);
2166 mlx4_release_resource(dev, slave, RES_VLAN,
2175 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2177 struct mlx4_priv *priv = mlx4_priv(dev);
2178 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2179 struct list_head *vlan_list =
2180 &tracker->slave_list[slave].res_list[RES_VLAN];
2181 struct vlan_res *res, *tmp;
2184 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2185 list_del(&res->list);
2186 /* dereference the vlan the num times the slave referenced it */
2187 for (i = 0; i < res->ref_count; i++)
2188 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2189 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2194 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2195 u64 in_param, u64 *out_param, int in_port)
2197 struct mlx4_priv *priv = mlx4_priv(dev);
2198 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2204 port = !in_port ? get_param_l(out_param) : in_port;
2206 if (!port || op != RES_OP_RESERVE_AND_MAP)
2209 port = mlx4_slave_convert_port(
2214 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2215 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2216 slave_state[slave].old_vlan_api = true;
2220 vlan = (u16) in_param;
2222 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2224 set_param_l(out_param, (u32) vlan_index);
2225 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2227 __mlx4_unregister_vlan(dev, port, vlan);
2232 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2233 u64 in_param, u64 *out_param, int port)
2238 if (op != RES_OP_RESERVE)
2241 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2245 err = __mlx4_counter_alloc(dev, &index);
2247 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2251 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2253 __mlx4_counter_free(dev, index);
2254 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2256 set_param_l(out_param, index);
2262 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2263 u64 in_param, u64 *out_param)
2268 if (op != RES_OP_RESERVE)
2271 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2275 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2277 __mlx4_xrcd_free(dev, xrcdn);
2279 set_param_l(out_param, xrcdn);
2284 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2285 struct mlx4_vhcr *vhcr,
2286 struct mlx4_cmd_mailbox *inbox,
2287 struct mlx4_cmd_mailbox *outbox,
2288 struct mlx4_cmd_info *cmd)
2291 int alop = vhcr->op_modifier;
2293 switch (vhcr->in_modifier & 0xFF) {
2295 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2296 vhcr->in_param, &vhcr->out_param);
2300 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2301 vhcr->in_param, &vhcr->out_param);
2305 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2306 vhcr->in_param, &vhcr->out_param);
2310 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2311 vhcr->in_param, &vhcr->out_param);
2315 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2316 vhcr->in_param, &vhcr->out_param);
2320 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2321 vhcr->in_param, &vhcr->out_param,
2322 (vhcr->in_modifier >> 8) & 0xFF);
2326 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2327 vhcr->in_param, &vhcr->out_param,
2328 (vhcr->in_modifier >> 8) & 0xFF);
2332 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2333 vhcr->in_param, &vhcr->out_param, 0);
2337 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2338 vhcr->in_param, &vhcr->out_param);
2349 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2358 case RES_OP_RESERVE:
2359 base = get_param_l(&in_param) & 0x7fffff;
2360 count = get_param_h(&in_param);
2361 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2364 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2365 __mlx4_qp_release_range(dev, base, count);
2367 case RES_OP_MAP_ICM:
2368 qpn = get_param_l(&in_param) & 0x7fffff;
2369 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2374 if (!fw_reserved(dev, qpn))
2375 __mlx4_qp_free_icm(dev, qpn);
2377 res_end_move(dev, slave, RES_QP, qpn);
2379 if (valid_reserved(dev, slave, qpn))
2380 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2389 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2390 u64 in_param, u64 *out_param)
2396 if (op != RES_OP_RESERVE_AND_MAP)
2399 base = get_param_l(&in_param);
2400 order = get_param_h(&in_param);
2401 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2403 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2404 __mlx4_free_mtt_range(dev, base, order);
2409 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2415 struct res_mpt *mpt;
2418 case RES_OP_RESERVE:
2419 index = get_param_l(&in_param);
2420 id = index & mpt_mask(dev);
2421 err = get_res(dev, slave, id, RES_MPT, &mpt);
2425 put_res(dev, slave, id, RES_MPT);
2427 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2430 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2431 __mlx4_mpt_release(dev, index);
2433 case RES_OP_MAP_ICM:
2434 index = get_param_l(&in_param);
2435 id = index & mpt_mask(dev);
2436 err = mr_res_start_move_to(dev, slave, id,
2437 RES_MPT_RESERVED, &mpt);
2441 __mlx4_mpt_free_icm(dev, mpt->key);
2442 res_end_move(dev, slave, RES_MPT, id);
2451 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2452 u64 in_param, u64 *out_param)
2458 case RES_OP_RESERVE_AND_MAP:
2459 cqn = get_param_l(&in_param);
2460 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2464 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2465 __mlx4_cq_free_icm(dev, cqn);
2476 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2477 u64 in_param, u64 *out_param)
2483 case RES_OP_RESERVE_AND_MAP:
2484 srqn = get_param_l(&in_param);
2485 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2489 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2490 __mlx4_srq_free_icm(dev, srqn);
2501 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2502 u64 in_param, u64 *out_param, int in_port)
2508 case RES_OP_RESERVE_AND_MAP:
2509 port = !in_port ? get_param_l(out_param) : in_port;
2510 port = mlx4_slave_convert_port(
2515 mac_del_from_slave(dev, slave, in_param, port);
2516 __mlx4_unregister_mac(dev, port, in_param);
2527 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2528 u64 in_param, u64 *out_param, int port)
2530 struct mlx4_priv *priv = mlx4_priv(dev);
2531 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2534 port = mlx4_slave_convert_port(
2540 case RES_OP_RESERVE_AND_MAP:
2541 if (slave_state[slave].old_vlan_api)
2545 vlan_del_from_slave(dev, slave, in_param, port);
2546 __mlx4_unregister_vlan(dev, port, in_param);
2556 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2557 u64 in_param, u64 *out_param)
2562 if (op != RES_OP_RESERVE)
2565 index = get_param_l(&in_param);
2566 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2569 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2573 __mlx4_counter_free(dev, index);
2574 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2579 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2580 u64 in_param, u64 *out_param)
2585 if (op != RES_OP_RESERVE)
2588 xrcdn = get_param_l(&in_param);
2589 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2593 __mlx4_xrcd_free(dev, xrcdn);
2598 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2599 struct mlx4_vhcr *vhcr,
2600 struct mlx4_cmd_mailbox *inbox,
2601 struct mlx4_cmd_mailbox *outbox,
2602 struct mlx4_cmd_info *cmd)
2605 int alop = vhcr->op_modifier;
2607 switch (vhcr->in_modifier & 0xFF) {
2609 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2614 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2615 vhcr->in_param, &vhcr->out_param);
2619 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2624 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2625 vhcr->in_param, &vhcr->out_param);
2629 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2630 vhcr->in_param, &vhcr->out_param);
2634 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2635 vhcr->in_param, &vhcr->out_param,
2636 (vhcr->in_modifier >> 8) & 0xFF);
2640 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2641 vhcr->in_param, &vhcr->out_param,
2642 (vhcr->in_modifier >> 8) & 0xFF);
2646 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2647 vhcr->in_param, &vhcr->out_param);
2651 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2652 vhcr->in_param, &vhcr->out_param);
2660 /* ugly but other choices are uglier */
2661 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2663 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2666 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2668 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2671 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2673 return be32_to_cpu(mpt->mtt_sz);
2676 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2678 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2681 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2683 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2686 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2688 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2691 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2693 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2696 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2698 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2701 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2703 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2706 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2708 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2709 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2710 int log_sq_sride = qpc->sq_size_stride & 7;
2711 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2712 int log_rq_stride = qpc->rq_size_stride & 7;
2713 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2714 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2715 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2716 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2721 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2723 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2724 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2725 total_mem = sq_size + rq_size;
2727 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2733 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2734 int size, struct res_mtt *mtt)
2736 int res_start = mtt->com.res_id;
2737 int res_size = (1 << mtt->order);
2739 if (start < res_start || start + size > res_start + res_size)
2744 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2745 struct mlx4_vhcr *vhcr,
2746 struct mlx4_cmd_mailbox *inbox,
2747 struct mlx4_cmd_mailbox *outbox,
2748 struct mlx4_cmd_info *cmd)
2751 int index = vhcr->in_modifier;
2752 struct res_mtt *mtt;
2753 struct res_mpt *mpt = NULL;
2754 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2760 id = index & mpt_mask(dev);
2761 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2765 /* Disable memory windows for VFs. */
2766 if (!mr_is_region(inbox->buf)) {
2771 /* Make sure that the PD bits related to the slave id are zeros. */
2772 pd = mr_get_pd(inbox->buf);
2773 pd_slave = (pd >> 17) & 0x7f;
2774 if (pd_slave != 0 && --pd_slave != slave) {
2779 if (mr_is_fmr(inbox->buf)) {
2780 /* FMR and Bind Enable are forbidden in slave devices. */
2781 if (mr_is_bind_enabled(inbox->buf)) {
2785 /* FMR and Memory Windows are also forbidden. */
2786 if (!mr_is_region(inbox->buf)) {
2792 phys = mr_phys_mpt(inbox->buf);
2794 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2798 err = check_mtt_range(dev, slave, mtt_base,
2799 mr_get_mtt_size(inbox->buf), mtt);
2806 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2811 atomic_inc(&mtt->ref_count);
2812 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2815 res_end_move(dev, slave, RES_MPT, id);
2820 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2822 res_abort_move(dev, slave, RES_MPT, id);
2827 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2828 struct mlx4_vhcr *vhcr,
2829 struct mlx4_cmd_mailbox *inbox,
2830 struct mlx4_cmd_mailbox *outbox,
2831 struct mlx4_cmd_info *cmd)
2834 int index = vhcr->in_modifier;
2835 struct res_mpt *mpt;
2838 id = index & mpt_mask(dev);
2839 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2843 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2848 atomic_dec(&mpt->mtt->ref_count);
2850 res_end_move(dev, slave, RES_MPT, id);
2854 res_abort_move(dev, slave, RES_MPT, id);
2859 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2860 struct mlx4_vhcr *vhcr,
2861 struct mlx4_cmd_mailbox *inbox,
2862 struct mlx4_cmd_mailbox *outbox,
2863 struct mlx4_cmd_info *cmd)
2866 int index = vhcr->in_modifier;
2867 struct res_mpt *mpt;
2870 id = index & mpt_mask(dev);
2871 err = get_res(dev, slave, id, RES_MPT, &mpt);
2875 if (mpt->com.from_state == RES_MPT_MAPPED) {
2876 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2877 * that, the VF must read the MPT. But since the MPT entry memory is not
2878 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2879 * entry contents. To guarantee that the MPT cannot be changed, the driver
2880 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2881 * ownership fofollowing the change. The change here allows the VF to
2882 * perform QUERY_MPT also when the entry is in SW ownership.
2884 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2885 &mlx4_priv(dev)->mr_table.dmpt_table,
2888 if (NULL == mpt_entry || NULL == outbox->buf) {
2893 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2896 } else if (mpt->com.from_state == RES_MPT_HW) {
2897 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2905 put_res(dev, slave, id, RES_MPT);
2909 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2911 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2914 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2916 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2919 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2921 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2924 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2925 struct mlx4_qp_context *context)
2927 u32 qpn = vhcr->in_modifier & 0xffffff;
2930 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2933 /* adjust qkey in qp context */
2934 context->qkey = cpu_to_be32(qkey);
2937 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2938 struct mlx4_qp_context *qpc,
2939 struct mlx4_cmd_mailbox *inbox);
2941 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2942 struct mlx4_vhcr *vhcr,
2943 struct mlx4_cmd_mailbox *inbox,
2944 struct mlx4_cmd_mailbox *outbox,
2945 struct mlx4_cmd_info *cmd)
2948 int qpn = vhcr->in_modifier & 0x7fffff;
2949 struct res_mtt *mtt;
2951 struct mlx4_qp_context *qpc = inbox->buf + 8;
2952 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2953 int mtt_size = qp_get_mtt_size(qpc);
2956 int rcqn = qp_get_rcqn(qpc);
2957 int scqn = qp_get_scqn(qpc);
2958 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2960 struct res_srq *srq;
2961 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2963 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2967 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2970 qp->local_qpn = local_qpn;
2971 qp->sched_queue = 0;
2973 qp->vlan_control = 0;
2975 qp->pri_path_fl = 0;
2978 qp->qpc_flags = be32_to_cpu(qpc->flags);
2980 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2984 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2988 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2993 err = get_res(dev, slave, scqn, RES_CQ, &scq);
3000 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3005 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3006 update_pkey_index(dev, slave, inbox);
3007 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3010 atomic_inc(&mtt->ref_count);
3012 atomic_inc(&rcq->ref_count);
3014 atomic_inc(&scq->ref_count);
3018 put_res(dev, slave, scqn, RES_CQ);
3021 atomic_inc(&srq->ref_count);
3022 put_res(dev, slave, srqn, RES_SRQ);
3026 /* Save param3 for dynamic changes from VST back to VGT */
3027 qp->param3 = qpc->param3;
3028 put_res(dev, slave, rcqn, RES_CQ);
3029 put_res(dev, slave, mtt_base, RES_MTT);
3030 res_end_move(dev, slave, RES_QP, qpn);
3036 put_res(dev, slave, srqn, RES_SRQ);
3039 put_res(dev, slave, scqn, RES_CQ);
3041 put_res(dev, slave, rcqn, RES_CQ);
3043 put_res(dev, slave, mtt_base, RES_MTT);
3045 res_abort_move(dev, slave, RES_QP, qpn);
3050 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3052 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3055 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3057 int log_eq_size = eqc->log_eq_size & 0x1f;
3058 int page_shift = (eqc->log_page_size & 0x3f) + 12;
3060 if (log_eq_size + 5 < page_shift)
3063 return 1 << (log_eq_size + 5 - page_shift);
3066 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3068 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3071 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3073 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3074 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3076 if (log_cq_size + 5 < page_shift)
3079 return 1 << (log_cq_size + 5 - page_shift);
3082 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3083 struct mlx4_vhcr *vhcr,
3084 struct mlx4_cmd_mailbox *inbox,
3085 struct mlx4_cmd_mailbox *outbox,
3086 struct mlx4_cmd_info *cmd)
3089 int eqn = vhcr->in_modifier;
3090 int res_id = (slave << 10) | eqn;
3091 struct mlx4_eq_context *eqc = inbox->buf;
3092 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3093 int mtt_size = eq_get_mtt_size(eqc);
3095 struct res_mtt *mtt;
3097 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3100 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3104 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3108 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3112 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3116 atomic_inc(&mtt->ref_count);
3118 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3119 res_end_move(dev, slave, RES_EQ, res_id);
3123 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3125 res_abort_move(dev, slave, RES_EQ, res_id);
3127 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3131 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3132 struct mlx4_vhcr *vhcr,
3133 struct mlx4_cmd_mailbox *inbox,
3134 struct mlx4_cmd_mailbox *outbox,
3135 struct mlx4_cmd_info *cmd)
3138 u8 get = vhcr->op_modifier;
3143 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3148 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3149 int len, struct res_mtt **res)
3151 struct mlx4_priv *priv = mlx4_priv(dev);
3152 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3153 struct res_mtt *mtt;
3156 spin_lock_irq(mlx4_tlock(dev));
3157 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3159 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3161 mtt->com.from_state = mtt->com.state;
3162 mtt->com.state = RES_MTT_BUSY;
3167 spin_unlock_irq(mlx4_tlock(dev));
3172 static int verify_qp_parameters(struct mlx4_dev *dev,
3173 struct mlx4_vhcr *vhcr,
3174 struct mlx4_cmd_mailbox *inbox,
3175 enum qp_transition transition, u8 slave)
3179 struct mlx4_qp_context *qp_ctx;
3180 enum mlx4_qp_optpar optpar;
3184 qp_ctx = inbox->buf + 8;
3185 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3186 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3188 if (slave != mlx4_master_func_num(dev)) {
3189 qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
3190 /* setting QP rate-limit is disallowed for VFs */
3191 if (qp_ctx->rate_limit_params)
3197 case MLX4_QP_ST_XRC:
3199 switch (transition) {
3200 case QP_TRANS_INIT2RTR:
3201 case QP_TRANS_RTR2RTS:
3202 case QP_TRANS_RTS2RTS:
3203 case QP_TRANS_SQD2SQD:
3204 case QP_TRANS_SQD2RTS:
3205 if (slave != mlx4_master_func_num(dev)) {
3206 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3207 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3208 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3209 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3212 if (qp_ctx->pri_path.mgid_index >= num_gids)
3215 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3216 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3217 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3218 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3221 if (qp_ctx->alt_path.mgid_index >= num_gids)
3231 case MLX4_QP_ST_MLX:
3232 qpn = vhcr->in_modifier & 0x7fffff;
3233 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3234 if (transition == QP_TRANS_INIT2RTR &&
3235 slave != mlx4_master_func_num(dev) &&
3236 mlx4_is_qp_reserved(dev, qpn) &&
3237 !mlx4_vf_smi_enabled(dev, slave, port)) {
3238 /* only enabled VFs may create MLX proxy QPs */
3239 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3240 __func__, slave, port);
3252 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3253 struct mlx4_vhcr *vhcr,
3254 struct mlx4_cmd_mailbox *inbox,
3255 struct mlx4_cmd_mailbox *outbox,
3256 struct mlx4_cmd_info *cmd)
3258 struct mlx4_mtt mtt;
3259 __be64 *page_list = inbox->buf;
3260 u64 *pg_list = (u64 *)page_list;
3262 struct res_mtt *rmtt = NULL;
3263 int start = be64_to_cpu(page_list[0]);
3264 int npages = vhcr->in_modifier;
3267 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3271 /* Call the SW implementation of write_mtt:
3272 * - Prepare a dummy mtt struct
3273 * - Translate inbox contents to simple addresses in host endianness */
3274 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3275 we don't really use it */
3278 for (i = 0; i < npages; ++i)
3279 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3281 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3282 ((u64 *)page_list + 2));
3285 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3290 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3291 struct mlx4_vhcr *vhcr,
3292 struct mlx4_cmd_mailbox *inbox,
3293 struct mlx4_cmd_mailbox *outbox,
3294 struct mlx4_cmd_info *cmd)
3296 int eqn = vhcr->in_modifier;
3297 int res_id = eqn | (slave << 10);
3301 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3305 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3309 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3313 atomic_dec(&eq->mtt->ref_count);
3314 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3315 res_end_move(dev, slave, RES_EQ, res_id);
3316 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3321 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3323 res_abort_move(dev, slave, RES_EQ, res_id);
3328 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3330 struct mlx4_priv *priv = mlx4_priv(dev);
3331 struct mlx4_slave_event_eq_info *event_eq;
3332 struct mlx4_cmd_mailbox *mailbox;
3333 u32 in_modifier = 0;
3338 if (!priv->mfunc.master.slave_state)
3341 /* check for slave valid, slave not PF, and slave active */
3342 if (slave < 0 || slave > dev->persist->num_vfs ||
3343 slave == dev->caps.function ||
3344 !priv->mfunc.master.slave_state[slave].active)
3347 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3349 /* Create the event only if the slave is registered */
3350 if (event_eq->eqn < 0)
3353 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3354 res_id = (slave << 10) | event_eq->eqn;
3355 err = get_res(dev, slave, res_id, RES_EQ, &req);
3359 if (req->com.from_state != RES_EQ_HW) {
3364 mailbox = mlx4_alloc_cmd_mailbox(dev);
3365 if (IS_ERR(mailbox)) {
3366 err = PTR_ERR(mailbox);
3370 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3372 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3375 memcpy(mailbox->buf, (u8 *) eqe, 28);
3377 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3379 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3380 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3383 put_res(dev, slave, res_id, RES_EQ);
3384 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3385 mlx4_free_cmd_mailbox(dev, mailbox);
3389 put_res(dev, slave, res_id, RES_EQ);
3392 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3396 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3397 struct mlx4_vhcr *vhcr,
3398 struct mlx4_cmd_mailbox *inbox,
3399 struct mlx4_cmd_mailbox *outbox,
3400 struct mlx4_cmd_info *cmd)
3402 int eqn = vhcr->in_modifier;
3403 int res_id = eqn | (slave << 10);
3407 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3411 if (eq->com.from_state != RES_EQ_HW) {
3416 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3419 put_res(dev, slave, res_id, RES_EQ);
3423 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3424 struct mlx4_vhcr *vhcr,
3425 struct mlx4_cmd_mailbox *inbox,
3426 struct mlx4_cmd_mailbox *outbox,
3427 struct mlx4_cmd_info *cmd)
3430 int cqn = vhcr->in_modifier;
3431 struct mlx4_cq_context *cqc = inbox->buf;
3432 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3433 struct res_cq *cq = NULL;
3434 struct res_mtt *mtt;
3436 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3439 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3442 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3445 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3448 atomic_inc(&mtt->ref_count);
3450 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3451 res_end_move(dev, slave, RES_CQ, cqn);
3455 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3457 res_abort_move(dev, slave, RES_CQ, cqn);
3461 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3462 struct mlx4_vhcr *vhcr,
3463 struct mlx4_cmd_mailbox *inbox,
3464 struct mlx4_cmd_mailbox *outbox,
3465 struct mlx4_cmd_info *cmd)
3468 int cqn = vhcr->in_modifier;
3469 struct res_cq *cq = NULL;
3471 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3474 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3477 atomic_dec(&cq->mtt->ref_count);
3478 res_end_move(dev, slave, RES_CQ, cqn);
3482 res_abort_move(dev, slave, RES_CQ, cqn);
3486 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3487 struct mlx4_vhcr *vhcr,
3488 struct mlx4_cmd_mailbox *inbox,
3489 struct mlx4_cmd_mailbox *outbox,
3490 struct mlx4_cmd_info *cmd)
3492 int cqn = vhcr->in_modifier;
3496 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3500 if (cq->com.from_state != RES_CQ_HW)
3503 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3505 put_res(dev, slave, cqn, RES_CQ);
3510 static int handle_resize(struct mlx4_dev *dev, int slave,
3511 struct mlx4_vhcr *vhcr,
3512 struct mlx4_cmd_mailbox *inbox,
3513 struct mlx4_cmd_mailbox *outbox,
3514 struct mlx4_cmd_info *cmd,
3518 struct res_mtt *orig_mtt;
3519 struct res_mtt *mtt;
3520 struct mlx4_cq_context *cqc = inbox->buf;
3521 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3523 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3527 if (orig_mtt != cq->mtt) {
3532 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3536 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3539 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3542 atomic_dec(&orig_mtt->ref_count);
3543 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3544 atomic_inc(&mtt->ref_count);
3546 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3550 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3552 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3558 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3559 struct mlx4_vhcr *vhcr,
3560 struct mlx4_cmd_mailbox *inbox,
3561 struct mlx4_cmd_mailbox *outbox,
3562 struct mlx4_cmd_info *cmd)
3564 int cqn = vhcr->in_modifier;
3568 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3572 if (cq->com.from_state != RES_CQ_HW)
3575 if (vhcr->op_modifier == 0) {
3576 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3580 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3582 put_res(dev, slave, cqn, RES_CQ);
3587 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3589 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3590 int log_rq_stride = srqc->logstride & 7;
3591 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3593 if (log_srq_size + log_rq_stride + 4 < page_shift)
3596 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3599 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3600 struct mlx4_vhcr *vhcr,
3601 struct mlx4_cmd_mailbox *inbox,
3602 struct mlx4_cmd_mailbox *outbox,
3603 struct mlx4_cmd_info *cmd)
3606 int srqn = vhcr->in_modifier;
3607 struct res_mtt *mtt;
3608 struct res_srq *srq = NULL;
3609 struct mlx4_srq_context *srqc = inbox->buf;
3610 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3612 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3615 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3618 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3621 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3626 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3630 atomic_inc(&mtt->ref_count);
3632 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3633 res_end_move(dev, slave, RES_SRQ, srqn);
3637 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3639 res_abort_move(dev, slave, RES_SRQ, srqn);
3644 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3645 struct mlx4_vhcr *vhcr,
3646 struct mlx4_cmd_mailbox *inbox,
3647 struct mlx4_cmd_mailbox *outbox,
3648 struct mlx4_cmd_info *cmd)
3651 int srqn = vhcr->in_modifier;
3652 struct res_srq *srq = NULL;
3654 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3657 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3660 atomic_dec(&srq->mtt->ref_count);
3662 atomic_dec(&srq->cq->ref_count);
3663 res_end_move(dev, slave, RES_SRQ, srqn);
3668 res_abort_move(dev, slave, RES_SRQ, srqn);
3673 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3674 struct mlx4_vhcr *vhcr,
3675 struct mlx4_cmd_mailbox *inbox,
3676 struct mlx4_cmd_mailbox *outbox,
3677 struct mlx4_cmd_info *cmd)
3680 int srqn = vhcr->in_modifier;
3681 struct res_srq *srq;
3683 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3686 if (srq->com.from_state != RES_SRQ_HW) {
3690 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3692 put_res(dev, slave, srqn, RES_SRQ);
3696 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3697 struct mlx4_vhcr *vhcr,
3698 struct mlx4_cmd_mailbox *inbox,
3699 struct mlx4_cmd_mailbox *outbox,
3700 struct mlx4_cmd_info *cmd)
3703 int srqn = vhcr->in_modifier;
3704 struct res_srq *srq;
3706 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3710 if (srq->com.from_state != RES_SRQ_HW) {
3715 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3717 put_res(dev, slave, srqn, RES_SRQ);
3721 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3722 struct mlx4_vhcr *vhcr,
3723 struct mlx4_cmd_mailbox *inbox,
3724 struct mlx4_cmd_mailbox *outbox,
3725 struct mlx4_cmd_info *cmd)
3728 int qpn = vhcr->in_modifier & 0x7fffff;
3731 err = get_res(dev, slave, qpn, RES_QP, &qp);
3734 if (qp->com.from_state != RES_QP_HW) {
3739 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3741 put_res(dev, slave, qpn, RES_QP);
3745 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3746 struct mlx4_vhcr *vhcr,
3747 struct mlx4_cmd_mailbox *inbox,
3748 struct mlx4_cmd_mailbox *outbox,
3749 struct mlx4_cmd_info *cmd)
3751 struct mlx4_qp_context *context = inbox->buf + 8;
3752 adjust_proxy_tun_qkey(dev, vhcr, context);
3753 update_pkey_index(dev, slave, inbox);
3754 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3757 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3758 struct mlx4_qp_context *qpc,
3759 struct mlx4_cmd_mailbox *inbox)
3761 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3763 int port = mlx4_slave_convert_port(
3764 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3769 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3772 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3773 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3774 qpc->pri_path.sched_queue = pri_sched_queue;
3777 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3778 port = mlx4_slave_convert_port(
3779 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3783 qpc->alt_path.sched_queue =
3784 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3790 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3791 struct mlx4_qp_context *qpc,
3792 struct mlx4_cmd_mailbox *inbox)
3796 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3797 u8 sched = *(u8 *)(inbox->buf + 64);
3800 port = (sched >> 6 & 1) + 1;
3801 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3802 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3803 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3809 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3810 struct mlx4_vhcr *vhcr,
3811 struct mlx4_cmd_mailbox *inbox,
3812 struct mlx4_cmd_mailbox *outbox,
3813 struct mlx4_cmd_info *cmd)
3816 struct mlx4_qp_context *qpc = inbox->buf + 8;
3817 int qpn = vhcr->in_modifier & 0x7fffff;
3819 u8 orig_sched_queue;
3820 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3821 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3822 u8 orig_pri_path_fl = qpc->pri_path.fl;
3823 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3824 u8 orig_feup = qpc->pri_path.feup;
3826 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3829 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3833 if (roce_verify_mac(dev, slave, qpc, inbox))
3836 update_pkey_index(dev, slave, inbox);
3837 update_gid(dev, inbox, (u8)slave);
3838 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3839 orig_sched_queue = qpc->pri_path.sched_queue;
3841 err = get_res(dev, slave, qpn, RES_QP, &qp);
3844 if (qp->com.from_state != RES_QP_HW) {
3849 err = update_vport_qp_param(dev, inbox, slave, qpn);
3853 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3855 /* if no error, save sched queue value passed in by VF. This is
3856 * essentially the QOS value provided by the VF. This will be useful
3857 * if we allow dynamic changes from VST back to VGT
3860 qp->sched_queue = orig_sched_queue;
3861 qp->vlan_control = orig_vlan_control;
3862 qp->fvl_rx = orig_fvl_rx;
3863 qp->pri_path_fl = orig_pri_path_fl;
3864 qp->vlan_index = orig_vlan_index;
3865 qp->feup = orig_feup;
3867 put_res(dev, slave, qpn, RES_QP);
3871 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3872 struct mlx4_vhcr *vhcr,
3873 struct mlx4_cmd_mailbox *inbox,
3874 struct mlx4_cmd_mailbox *outbox,
3875 struct mlx4_cmd_info *cmd)
3878 struct mlx4_qp_context *context = inbox->buf + 8;
3880 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3883 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3887 update_pkey_index(dev, slave, inbox);
3888 update_gid(dev, inbox, (u8)slave);
3889 adjust_proxy_tun_qkey(dev, vhcr, context);
3890 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3893 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3894 struct mlx4_vhcr *vhcr,
3895 struct mlx4_cmd_mailbox *inbox,
3896 struct mlx4_cmd_mailbox *outbox,
3897 struct mlx4_cmd_info *cmd)
3900 struct mlx4_qp_context *context = inbox->buf + 8;
3902 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3905 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3909 update_pkey_index(dev, slave, inbox);
3910 update_gid(dev, inbox, (u8)slave);
3911 adjust_proxy_tun_qkey(dev, vhcr, context);
3912 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3916 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3917 struct mlx4_vhcr *vhcr,
3918 struct mlx4_cmd_mailbox *inbox,
3919 struct mlx4_cmd_mailbox *outbox,
3920 struct mlx4_cmd_info *cmd)
3922 struct mlx4_qp_context *context = inbox->buf + 8;
3923 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3926 adjust_proxy_tun_qkey(dev, vhcr, context);
3927 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3930 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3931 struct mlx4_vhcr *vhcr,
3932 struct mlx4_cmd_mailbox *inbox,
3933 struct mlx4_cmd_mailbox *outbox,
3934 struct mlx4_cmd_info *cmd)
3937 struct mlx4_qp_context *context = inbox->buf + 8;
3939 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3942 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3946 adjust_proxy_tun_qkey(dev, vhcr, context);
3947 update_gid(dev, inbox, (u8)slave);
3948 update_pkey_index(dev, slave, inbox);
3949 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3952 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3953 struct mlx4_vhcr *vhcr,
3954 struct mlx4_cmd_mailbox *inbox,
3955 struct mlx4_cmd_mailbox *outbox,
3956 struct mlx4_cmd_info *cmd)
3959 struct mlx4_qp_context *context = inbox->buf + 8;
3961 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3964 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3968 adjust_proxy_tun_qkey(dev, vhcr, context);
3969 update_gid(dev, inbox, (u8)slave);
3970 update_pkey_index(dev, slave, inbox);
3971 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3974 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3975 struct mlx4_vhcr *vhcr,
3976 struct mlx4_cmd_mailbox *inbox,
3977 struct mlx4_cmd_mailbox *outbox,
3978 struct mlx4_cmd_info *cmd)
3981 int qpn = vhcr->in_modifier & 0x7fffff;
3984 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3987 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3991 atomic_dec(&qp->mtt->ref_count);
3992 atomic_dec(&qp->rcq->ref_count);
3993 atomic_dec(&qp->scq->ref_count);
3995 atomic_dec(&qp->srq->ref_count);
3996 res_end_move(dev, slave, RES_QP, qpn);
4000 res_abort_move(dev, slave, RES_QP, qpn);
4005 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4006 struct res_qp *rqp, u8 *gid)
4008 struct res_gid *res;
4010 list_for_each_entry(res, &rqp->mcg_list, list) {
4011 if (!memcmp(res->gid, gid, 16))
4017 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4018 u8 *gid, enum mlx4_protocol prot,
4019 enum mlx4_steer_type steer, u64 reg_id)
4021 struct res_gid *res;
4024 res = kzalloc(sizeof(*res), GFP_KERNEL);
4028 spin_lock_irq(&rqp->mcg_spl);
4029 if (find_gid(dev, slave, rqp, gid)) {
4033 memcpy(res->gid, gid, 16);
4036 res->reg_id = reg_id;
4037 list_add_tail(&res->list, &rqp->mcg_list);
4040 spin_unlock_irq(&rqp->mcg_spl);
4045 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4046 u8 *gid, enum mlx4_protocol prot,
4047 enum mlx4_steer_type steer, u64 *reg_id)
4049 struct res_gid *res;
4052 spin_lock_irq(&rqp->mcg_spl);
4053 res = find_gid(dev, slave, rqp, gid);
4054 if (!res || res->prot != prot || res->steer != steer)
4057 *reg_id = res->reg_id;
4058 list_del(&res->list);
4062 spin_unlock_irq(&rqp->mcg_spl);
4067 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4068 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4069 enum mlx4_steer_type type, u64 *reg_id)
4071 switch (dev->caps.steering_mode) {
4072 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4073 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4076 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4077 block_loopback, prot,
4080 case MLX4_STEERING_MODE_B0:
4081 if (prot == MLX4_PROT_ETH) {
4082 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4087 return mlx4_qp_attach_common(dev, qp, gid,
4088 block_loopback, prot, type);
4094 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4095 u8 gid[16], enum mlx4_protocol prot,
4096 enum mlx4_steer_type type, u64 reg_id)
4098 switch (dev->caps.steering_mode) {
4099 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4100 return mlx4_flow_detach(dev, reg_id);
4101 case MLX4_STEERING_MODE_B0:
4102 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4108 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4109 u8 *gid, enum mlx4_protocol prot)
4113 if (prot != MLX4_PROT_ETH)
4116 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4117 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4118 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4127 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4128 struct mlx4_vhcr *vhcr,
4129 struct mlx4_cmd_mailbox *inbox,
4130 struct mlx4_cmd_mailbox *outbox,
4131 struct mlx4_cmd_info *cmd)
4133 struct mlx4_qp qp; /* dummy for calling attach/detach */
4134 u8 *gid = inbox->buf;
4135 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4140 int attach = vhcr->op_modifier;
4141 int block_loopback = vhcr->in_modifier >> 31;
4142 u8 steer_type_mask = 2;
4143 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4145 qpn = vhcr->in_modifier & 0xffffff;
4146 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4152 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4155 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4158 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4162 err = mlx4_adjust_port(dev, slave, gid, prot);
4166 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
4170 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4172 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4175 put_res(dev, slave, qpn, RES_QP);
4179 qp_detach(dev, &qp, gid, prot, type, reg_id);
4181 put_res(dev, slave, qpn, RES_QP);
4186 * MAC validation for Flow Steering rules.
4187 * VF can attach rules only with a mac address which is assigned to it.
4189 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4190 struct list_head *rlist)
4192 struct mac_res *res, *tmp;
4195 /* make sure it isn't multicast or broadcast mac*/
4196 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4197 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4198 list_for_each_entry_safe(res, tmp, rlist, list) {
4199 be_mac = cpu_to_be64(res->mac << 16);
4200 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4203 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4204 eth_header->eth.dst_mac, slave);
4211 * In case of missing eth header, append eth header with a MAC address
4212 * assigned to the VF.
4214 static int add_eth_header(struct mlx4_dev *dev, int slave,
4215 struct mlx4_cmd_mailbox *inbox,
4216 struct list_head *rlist, int header_id)
4218 struct mac_res *res, *tmp;
4220 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4221 struct mlx4_net_trans_rule_hw_eth *eth_header;
4222 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4223 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4225 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4227 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4229 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4231 /* Clear a space in the inbox for eth header */
4232 switch (header_id) {
4233 case MLX4_NET_TRANS_RULE_ID_IPV4:
4235 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4236 memmove(ip_header, eth_header,
4237 sizeof(*ip_header) + sizeof(*l4_header));
4239 case MLX4_NET_TRANS_RULE_ID_TCP:
4240 case MLX4_NET_TRANS_RULE_ID_UDP:
4241 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4243 memmove(l4_header, eth_header, sizeof(*l4_header));
4248 list_for_each_entry_safe(res, tmp, rlist, list) {
4249 if (port == res->port) {
4250 be_mac = cpu_to_be64(res->mac << 16);
4255 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4260 memset(eth_header, 0, sizeof(*eth_header));
4261 eth_header->size = sizeof(*eth_header) >> 2;
4262 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4263 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4264 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4270 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4271 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4272 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4273 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4274 struct mlx4_vhcr *vhcr,
4275 struct mlx4_cmd_mailbox *inbox,
4276 struct mlx4_cmd_mailbox *outbox,
4277 struct mlx4_cmd_info *cmd_info)
4280 u32 qpn = vhcr->in_modifier & 0xffffff;
4284 u64 pri_addr_path_mask;
4285 struct mlx4_update_qp_context *cmd;
4288 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4290 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4291 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4292 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4295 if ((pri_addr_path_mask &
4296 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4297 !(dev->caps.flags2 &
4298 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4299 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4304 /* Just change the smac for the QP */
4305 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4307 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4311 port = (rqp->sched_queue >> 6 & 1) + 1;
4313 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4314 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4315 err = mac_find_smac_ix_in_slave(dev, slave, port,
4319 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4325 err = mlx4_cmd(dev, inbox->dma,
4326 vhcr->in_modifier, 0,
4327 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4330 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4335 put_res(dev, slave, qpn, RES_QP);
4339 static u32 qp_attach_mbox_size(void *mbox)
4341 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4342 struct _rule_hw *rule_header;
4344 rule_header = (struct _rule_hw *)(mbox + size);
4346 while (rule_header->size) {
4347 size += rule_header->size * sizeof(u32);
4353 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4355 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4356 struct mlx4_vhcr *vhcr,
4357 struct mlx4_cmd_mailbox *inbox,
4358 struct mlx4_cmd_mailbox *outbox,
4359 struct mlx4_cmd_info *cmd)
4362 struct mlx4_priv *priv = mlx4_priv(dev);
4363 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4364 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4368 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4369 struct _rule_hw *rule_header;
4371 struct res_fs_rule *rrule;
4374 if (dev->caps.steering_mode !=
4375 MLX4_STEERING_MODE_DEVICE_MANAGED)
4378 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4379 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4383 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4384 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4386 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4389 rule_header = (struct _rule_hw *)(ctrl + 1);
4390 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4392 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4393 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4395 switch (header_id) {
4396 case MLX4_NET_TRANS_RULE_ID_ETH:
4397 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4402 case MLX4_NET_TRANS_RULE_ID_IB:
4404 case MLX4_NET_TRANS_RULE_ID_IPV4:
4405 case MLX4_NET_TRANS_RULE_ID_TCP:
4406 case MLX4_NET_TRANS_RULE_ID_UDP:
4407 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4408 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4412 vhcr->in_modifier +=
4413 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4416 pr_err("Corrupted mailbox\n");
4421 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4422 vhcr->in_modifier, 0,
4423 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4429 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4431 mlx4_err(dev, "Fail to add flow steering resources\n");
4435 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4439 mbox_size = qp_attach_mbox_size(inbox->buf);
4440 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4441 if (!rrule->mirr_mbox) {
4445 rrule->mirr_mbox_size = mbox_size;
4446 rrule->mirr_rule_id = 0;
4447 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4449 /* set different port */
4450 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4451 if (ctrl->port == 1)
4456 if (mlx4_is_bonded(dev))
4457 mlx4_do_mirror_rule(dev, rrule);
4459 atomic_inc(&rqp->ref_count);
4462 put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4464 /* detach rule on error */
4466 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4467 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4470 put_res(dev, slave, qpn, RES_QP);
4474 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4478 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4480 mlx4_err(dev, "Fail to remove flow steering resources\n");
4484 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4485 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4489 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4490 struct mlx4_vhcr *vhcr,
4491 struct mlx4_cmd_mailbox *inbox,
4492 struct mlx4_cmd_mailbox *outbox,
4493 struct mlx4_cmd_info *cmd)
4497 struct res_fs_rule *rrule;
4501 if (dev->caps.steering_mode !=
4502 MLX4_STEERING_MODE_DEVICE_MANAGED)
4505 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4509 if (!rrule->mirr_mbox) {
4510 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4511 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4514 mirr_reg_id = rrule->mirr_rule_id;
4515 kfree(rrule->mirr_mbox);
4518 /* Release the rule form busy state before removal */
4519 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4520 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4524 if (mirr_reg_id && mlx4_is_bonded(dev)) {
4525 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4527 mlx4_err(dev, "Fail to get resource of mirror rule\n");
4529 put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4530 mlx4_undo_mirror_rule(dev, rrule);
4533 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4535 mlx4_err(dev, "Fail to remove flow steering resources\n");
4539 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4540 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4543 atomic_dec(&rqp->ref_count);
4545 put_res(dev, slave, qpn, RES_QP);
4550 BUSY_MAX_RETRIES = 10
4553 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4554 struct mlx4_vhcr *vhcr,
4555 struct mlx4_cmd_mailbox *inbox,
4556 struct mlx4_cmd_mailbox *outbox,
4557 struct mlx4_cmd_info *cmd)
4560 int index = vhcr->in_modifier & 0xffff;
4562 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4566 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4567 put_res(dev, slave, index, RES_COUNTER);
4571 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4573 struct res_gid *rgid;
4574 struct res_gid *tmp;
4575 struct mlx4_qp qp; /* dummy for calling attach/detach */
4577 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4578 switch (dev->caps.steering_mode) {
4579 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4580 mlx4_flow_detach(dev, rgid->reg_id);
4582 case MLX4_STEERING_MODE_B0:
4583 qp.qpn = rqp->local_qpn;
4584 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4585 rgid->prot, rgid->steer);
4588 list_del(&rgid->list);
4593 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4594 enum mlx4_resource type, int print)
4596 struct mlx4_priv *priv = mlx4_priv(dev);
4597 struct mlx4_resource_tracker *tracker =
4598 &priv->mfunc.master.res_tracker;
4599 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4600 struct res_common *r;
4601 struct res_common *tmp;
4605 spin_lock_irq(mlx4_tlock(dev));
4606 list_for_each_entry_safe(r, tmp, rlist, list) {
4607 if (r->owner == slave) {
4609 if (r->state == RES_ANY_BUSY) {
4612 "%s id 0x%llx is busy\n",
4617 r->from_state = r->state;
4618 r->state = RES_ANY_BUSY;
4624 spin_unlock_irq(mlx4_tlock(dev));
4629 static int move_all_busy(struct mlx4_dev *dev, int slave,
4630 enum mlx4_resource type)
4632 unsigned long begin;
4637 busy = _move_all_busy(dev, slave, type, 0);
4638 if (time_after(jiffies, begin + 5 * HZ))
4645 busy = _move_all_busy(dev, slave, type, 1);
4649 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4651 struct mlx4_priv *priv = mlx4_priv(dev);
4652 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4653 struct list_head *qp_list =
4654 &tracker->slave_list[slave].res_list[RES_QP];
4662 err = move_all_busy(dev, slave, RES_QP);
4664 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4667 spin_lock_irq(mlx4_tlock(dev));
4668 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4669 spin_unlock_irq(mlx4_tlock(dev));
4670 if (qp->com.owner == slave) {
4671 qpn = qp->com.res_id;
4672 detach_qp(dev, slave, qp);
4673 state = qp->com.from_state;
4674 while (state != 0) {
4676 case RES_QP_RESERVED:
4677 spin_lock_irq(mlx4_tlock(dev));
4678 rb_erase(&qp->com.node,
4679 &tracker->res_tree[RES_QP]);
4680 list_del(&qp->com.list);
4681 spin_unlock_irq(mlx4_tlock(dev));
4682 if (!valid_reserved(dev, slave, qpn)) {
4683 __mlx4_qp_release_range(dev, qpn, 1);
4684 mlx4_release_resource(dev, slave,
4691 if (!valid_reserved(dev, slave, qpn))
4692 __mlx4_qp_free_icm(dev, qpn);
4693 state = RES_QP_RESERVED;
4697 err = mlx4_cmd(dev, in_param,
4700 MLX4_CMD_TIME_CLASS_A,
4703 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4704 slave, qp->local_qpn);
4705 atomic_dec(&qp->rcq->ref_count);
4706 atomic_dec(&qp->scq->ref_count);
4707 atomic_dec(&qp->mtt->ref_count);
4709 atomic_dec(&qp->srq->ref_count);
4710 state = RES_QP_MAPPED;
4717 spin_lock_irq(mlx4_tlock(dev));
4719 spin_unlock_irq(mlx4_tlock(dev));
4722 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4724 struct mlx4_priv *priv = mlx4_priv(dev);
4725 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4726 struct list_head *srq_list =
4727 &tracker->slave_list[slave].res_list[RES_SRQ];
4728 struct res_srq *srq;
4729 struct res_srq *tmp;
4736 err = move_all_busy(dev, slave, RES_SRQ);
4738 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4741 spin_lock_irq(mlx4_tlock(dev));
4742 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4743 spin_unlock_irq(mlx4_tlock(dev));
4744 if (srq->com.owner == slave) {
4745 srqn = srq->com.res_id;
4746 state = srq->com.from_state;
4747 while (state != 0) {
4749 case RES_SRQ_ALLOCATED:
4750 __mlx4_srq_free_icm(dev, srqn);
4751 spin_lock_irq(mlx4_tlock(dev));
4752 rb_erase(&srq->com.node,
4753 &tracker->res_tree[RES_SRQ]);
4754 list_del(&srq->com.list);
4755 spin_unlock_irq(mlx4_tlock(dev));
4756 mlx4_release_resource(dev, slave,
4764 err = mlx4_cmd(dev, in_param, srqn, 1,
4766 MLX4_CMD_TIME_CLASS_A,
4769 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4772 atomic_dec(&srq->mtt->ref_count);
4774 atomic_dec(&srq->cq->ref_count);
4775 state = RES_SRQ_ALLOCATED;
4783 spin_lock_irq(mlx4_tlock(dev));
4785 spin_unlock_irq(mlx4_tlock(dev));
4788 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4790 struct mlx4_priv *priv = mlx4_priv(dev);
4791 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4792 struct list_head *cq_list =
4793 &tracker->slave_list[slave].res_list[RES_CQ];
4802 err = move_all_busy(dev, slave, RES_CQ);
4804 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4807 spin_lock_irq(mlx4_tlock(dev));
4808 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4809 spin_unlock_irq(mlx4_tlock(dev));
4810 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4811 cqn = cq->com.res_id;
4812 state = cq->com.from_state;
4813 while (state != 0) {
4815 case RES_CQ_ALLOCATED:
4816 __mlx4_cq_free_icm(dev, cqn);
4817 spin_lock_irq(mlx4_tlock(dev));
4818 rb_erase(&cq->com.node,
4819 &tracker->res_tree[RES_CQ]);
4820 list_del(&cq->com.list);
4821 spin_unlock_irq(mlx4_tlock(dev));
4822 mlx4_release_resource(dev, slave,
4830 err = mlx4_cmd(dev, in_param, cqn, 1,
4832 MLX4_CMD_TIME_CLASS_A,
4835 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4837 atomic_dec(&cq->mtt->ref_count);
4838 state = RES_CQ_ALLOCATED;
4846 spin_lock_irq(mlx4_tlock(dev));
4848 spin_unlock_irq(mlx4_tlock(dev));
4851 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4853 struct mlx4_priv *priv = mlx4_priv(dev);
4854 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4855 struct list_head *mpt_list =
4856 &tracker->slave_list[slave].res_list[RES_MPT];
4857 struct res_mpt *mpt;
4858 struct res_mpt *tmp;
4865 err = move_all_busy(dev, slave, RES_MPT);
4867 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4870 spin_lock_irq(mlx4_tlock(dev));
4871 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4872 spin_unlock_irq(mlx4_tlock(dev));
4873 if (mpt->com.owner == slave) {
4874 mptn = mpt->com.res_id;
4875 state = mpt->com.from_state;
4876 while (state != 0) {
4878 case RES_MPT_RESERVED:
4879 __mlx4_mpt_release(dev, mpt->key);
4880 spin_lock_irq(mlx4_tlock(dev));
4881 rb_erase(&mpt->com.node,
4882 &tracker->res_tree[RES_MPT]);
4883 list_del(&mpt->com.list);
4884 spin_unlock_irq(mlx4_tlock(dev));
4885 mlx4_release_resource(dev, slave,
4891 case RES_MPT_MAPPED:
4892 __mlx4_mpt_free_icm(dev, mpt->key);
4893 state = RES_MPT_RESERVED;
4898 err = mlx4_cmd(dev, in_param, mptn, 0,
4900 MLX4_CMD_TIME_CLASS_A,
4903 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4906 atomic_dec(&mpt->mtt->ref_count);
4907 state = RES_MPT_MAPPED;
4914 spin_lock_irq(mlx4_tlock(dev));
4916 spin_unlock_irq(mlx4_tlock(dev));
4919 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4921 struct mlx4_priv *priv = mlx4_priv(dev);
4922 struct mlx4_resource_tracker *tracker =
4923 &priv->mfunc.master.res_tracker;
4924 struct list_head *mtt_list =
4925 &tracker->slave_list[slave].res_list[RES_MTT];
4926 struct res_mtt *mtt;
4927 struct res_mtt *tmp;
4933 err = move_all_busy(dev, slave, RES_MTT);
4935 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4938 spin_lock_irq(mlx4_tlock(dev));
4939 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4940 spin_unlock_irq(mlx4_tlock(dev));
4941 if (mtt->com.owner == slave) {
4942 base = mtt->com.res_id;
4943 state = mtt->com.from_state;
4944 while (state != 0) {
4946 case RES_MTT_ALLOCATED:
4947 __mlx4_free_mtt_range(dev, base,
4949 spin_lock_irq(mlx4_tlock(dev));
4950 rb_erase(&mtt->com.node,
4951 &tracker->res_tree[RES_MTT]);
4952 list_del(&mtt->com.list);
4953 spin_unlock_irq(mlx4_tlock(dev));
4954 mlx4_release_resource(dev, slave, RES_MTT,
4955 1 << mtt->order, 0);
4965 spin_lock_irq(mlx4_tlock(dev));
4967 spin_unlock_irq(mlx4_tlock(dev));
4970 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4972 struct mlx4_cmd_mailbox *mailbox;
4974 struct res_fs_rule *mirr_rule;
4977 mailbox = mlx4_alloc_cmd_mailbox(dev);
4978 if (IS_ERR(mailbox))
4979 return PTR_ERR(mailbox);
4981 if (!fs_rule->mirr_mbox) {
4982 mlx4_err(dev, "rule mirroring mailbox is null\n");
4985 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4986 err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0,
4987 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4989 mlx4_free_cmd_mailbox(dev, mailbox);
4994 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4998 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
5002 fs_rule->mirr_rule_id = reg_id;
5003 mirr_rule->mirr_rule_id = 0;
5004 mirr_rule->mirr_mbox_size = 0;
5005 mirr_rule->mirr_mbox = NULL;
5006 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5010 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5012 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5013 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5018 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5020 struct mlx4_priv *priv = mlx4_priv(dev);
5021 struct mlx4_resource_tracker *tracker =
5022 &priv->mfunc.master.res_tracker;
5023 struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5025 struct res_fs_rule *fs_rule;
5027 LIST_HEAD(mirr_list);
5029 for (p = rb_first(root); p; p = rb_next(p)) {
5030 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5031 if ((bond && fs_rule->mirr_mbox_size) ||
5032 (!bond && !fs_rule->mirr_mbox_size))
5033 list_add_tail(&fs_rule->mirr_list, &mirr_list);
5036 list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5038 err += mlx4_do_mirror_rule(dev, fs_rule);
5040 err += mlx4_undo_mirror_rule(dev, fs_rule);
5045 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5047 return mlx4_mirror_fs_rules(dev, true);
5050 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5052 return mlx4_mirror_fs_rules(dev, false);
5055 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5057 struct mlx4_priv *priv = mlx4_priv(dev);
5058 struct mlx4_resource_tracker *tracker =
5059 &priv->mfunc.master.res_tracker;
5060 struct list_head *fs_rule_list =
5061 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5062 struct res_fs_rule *fs_rule;
5063 struct res_fs_rule *tmp;
5068 err = move_all_busy(dev, slave, RES_FS_RULE);
5070 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5073 spin_lock_irq(mlx4_tlock(dev));
5074 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5075 spin_unlock_irq(mlx4_tlock(dev));
5076 if (fs_rule->com.owner == slave) {
5077 base = fs_rule->com.res_id;
5078 state = fs_rule->com.from_state;
5079 while (state != 0) {
5081 case RES_FS_RULE_ALLOCATED:
5083 err = mlx4_cmd(dev, base, 0, 0,
5084 MLX4_QP_FLOW_STEERING_DETACH,
5085 MLX4_CMD_TIME_CLASS_A,
5088 spin_lock_irq(mlx4_tlock(dev));
5089 rb_erase(&fs_rule->com.node,
5090 &tracker->res_tree[RES_FS_RULE]);
5091 list_del(&fs_rule->com.list);
5092 spin_unlock_irq(mlx4_tlock(dev));
5093 kfree(fs_rule->mirr_mbox);
5103 spin_lock_irq(mlx4_tlock(dev));
5105 spin_unlock_irq(mlx4_tlock(dev));
5108 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5110 struct mlx4_priv *priv = mlx4_priv(dev);
5111 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5112 struct list_head *eq_list =
5113 &tracker->slave_list[slave].res_list[RES_EQ];
5121 err = move_all_busy(dev, slave, RES_EQ);
5123 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5126 spin_lock_irq(mlx4_tlock(dev));
5127 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5128 spin_unlock_irq(mlx4_tlock(dev));
5129 if (eq->com.owner == slave) {
5130 eqn = eq->com.res_id;
5131 state = eq->com.from_state;
5132 while (state != 0) {
5134 case RES_EQ_RESERVED:
5135 spin_lock_irq(mlx4_tlock(dev));
5136 rb_erase(&eq->com.node,
5137 &tracker->res_tree[RES_EQ]);
5138 list_del(&eq->com.list);
5139 spin_unlock_irq(mlx4_tlock(dev));
5145 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5146 1, MLX4_CMD_HW2SW_EQ,
5147 MLX4_CMD_TIME_CLASS_A,
5150 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5151 slave, eqn & 0x3ff);
5152 atomic_dec(&eq->mtt->ref_count);
5153 state = RES_EQ_RESERVED;
5161 spin_lock_irq(mlx4_tlock(dev));
5163 spin_unlock_irq(mlx4_tlock(dev));
5166 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5168 struct mlx4_priv *priv = mlx4_priv(dev);
5169 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5170 struct list_head *counter_list =
5171 &tracker->slave_list[slave].res_list[RES_COUNTER];
5172 struct res_counter *counter;
5173 struct res_counter *tmp;
5175 int *counters_arr = NULL;
5178 err = move_all_busy(dev, slave, RES_COUNTER);
5180 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5183 counters_arr = kmalloc_array(dev->caps.max_counters,
5184 sizeof(*counters_arr), GFP_KERNEL);
5191 spin_lock_irq(mlx4_tlock(dev));
5192 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5193 if (counter->com.owner == slave) {
5194 counters_arr[i++] = counter->com.res_id;
5195 rb_erase(&counter->com.node,
5196 &tracker->res_tree[RES_COUNTER]);
5197 list_del(&counter->com.list);
5201 spin_unlock_irq(mlx4_tlock(dev));
5204 __mlx4_counter_free(dev, counters_arr[j++]);
5205 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5209 kfree(counters_arr);
5212 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5214 struct mlx4_priv *priv = mlx4_priv(dev);
5215 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5216 struct list_head *xrcdn_list =
5217 &tracker->slave_list[slave].res_list[RES_XRCD];
5218 struct res_xrcdn *xrcd;
5219 struct res_xrcdn *tmp;
5223 err = move_all_busy(dev, slave, RES_XRCD);
5225 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5228 spin_lock_irq(mlx4_tlock(dev));
5229 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5230 if (xrcd->com.owner == slave) {
5231 xrcdn = xrcd->com.res_id;
5232 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5233 list_del(&xrcd->com.list);
5235 __mlx4_xrcd_free(dev, xrcdn);
5238 spin_unlock_irq(mlx4_tlock(dev));
5241 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5243 struct mlx4_priv *priv = mlx4_priv(dev);
5244 mlx4_reset_roce_gids(dev, slave);
5245 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5246 rem_slave_vlans(dev, slave);
5247 rem_slave_macs(dev, slave);
5248 rem_slave_fs_rule(dev, slave);
5249 rem_slave_qps(dev, slave);
5250 rem_slave_srqs(dev, slave);
5251 rem_slave_cqs(dev, slave);
5252 rem_slave_mrs(dev, slave);
5253 rem_slave_eqs(dev, slave);
5254 rem_slave_mtts(dev, slave);
5255 rem_slave_counters(dev, slave);
5256 rem_slave_xrcdns(dev, slave);
5257 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5260 static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5261 struct mlx4_vf_immed_vlan_work *work)
5263 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5264 ctx->qp_context.qos_vport = work->qos_vport;
5267 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5269 struct mlx4_vf_immed_vlan_work *work =
5270 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5271 struct mlx4_cmd_mailbox *mailbox;
5272 struct mlx4_update_qp_context *upd_context;
5273 struct mlx4_dev *dev = &work->priv->dev;
5274 struct mlx4_resource_tracker *tracker =
5275 &work->priv->mfunc.master.res_tracker;
5276 struct list_head *qp_list =
5277 &tracker->slave_list[work->slave].res_list[RES_QP];
5280 u64 qp_path_mask_vlan_ctrl =
5281 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5282 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5283 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5284 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5285 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5286 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5288 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5289 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5290 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5291 (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5292 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5293 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5294 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5295 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5298 int port, errors = 0;
5301 if (mlx4_is_slave(dev)) {
5302 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5307 mailbox = mlx4_alloc_cmd_mailbox(dev);
5308 if (IS_ERR(mailbox))
5310 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5311 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5312 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5313 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5314 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5315 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5316 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5317 else if (!work->vlan_id)
5318 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5319 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5320 else if (work->vlan_proto == htons(ETH_P_8021AD))
5321 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5322 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5323 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5324 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5325 else /* vst 802.1Q */
5326 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5327 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5328 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5330 upd_context = mailbox->buf;
5331 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5333 spin_lock_irq(mlx4_tlock(dev));
5334 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5335 spin_unlock_irq(mlx4_tlock(dev));
5336 if (qp->com.owner == work->slave) {
5337 if (qp->com.from_state != RES_QP_HW ||
5338 !qp->sched_queue || /* no INIT2RTR trans yet */
5339 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5340 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5341 spin_lock_irq(mlx4_tlock(dev));
5344 port = (qp->sched_queue >> 6 & 1) + 1;
5345 if (port != work->port) {
5346 spin_lock_irq(mlx4_tlock(dev));
5349 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5350 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5352 upd_context->primary_addr_path_mask =
5353 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5354 if (work->vlan_id == MLX4_VGT) {
5355 upd_context->qp_context.param3 = qp->param3;
5356 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5357 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5358 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5359 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5360 upd_context->qp_context.pri_path.feup = qp->feup;
5361 upd_context->qp_context.pri_path.sched_queue =
5364 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5365 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5366 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5367 upd_context->qp_context.pri_path.fvl_rx =
5368 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5369 upd_context->qp_context.pri_path.fl =
5370 qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5371 if (work->vlan_proto == htons(ETH_P_8021AD))
5372 upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5374 upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5375 upd_context->qp_context.pri_path.feup =
5376 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5377 upd_context->qp_context.pri_path.sched_queue =
5378 qp->sched_queue & 0xC7;
5379 upd_context->qp_context.pri_path.sched_queue |=
5380 ((work->qos & 0x7) << 3);
5382 if (dev->caps.flags2 &
5383 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5384 update_qos_vpp(upd_context, work);
5387 err = mlx4_cmd(dev, mailbox->dma,
5388 qp->local_qpn & 0xffffff,
5389 0, MLX4_CMD_UPDATE_QP,
5390 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5392 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5393 work->slave, port, qp->local_qpn, err);
5397 spin_lock_irq(mlx4_tlock(dev));
5399 spin_unlock_irq(mlx4_tlock(dev));
5400 mlx4_free_cmd_mailbox(dev, mailbox);
5403 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5404 errors, work->slave, work->port);
5406 /* unregister previous vlan_id if needed and we had no errors
5407 * while updating the QPs
5409 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5410 NO_INDX != work->orig_vlan_ix)
5411 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5412 work->orig_vlan_id);