2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
61 struct list_head list;
69 struct list_head list;
84 struct list_head list;
86 enum mlx4_protocol prot;
87 enum mlx4_steer_type steer;
92 RES_QP_BUSY = RES_ANY_BUSY,
94 /* QP number was allocated */
97 /* ICM memory for QP context was mapped */
100 /* QP is in hw ownership */
105 struct res_common com;
110 struct list_head mcg_list;
115 /* saved qp params before VST enforcement in order to restore on VGT */
125 enum res_mtt_states {
126 RES_MTT_BUSY = RES_ANY_BUSY,
130 static inline const char *mtt_states_str(enum res_mtt_states state)
133 case RES_MTT_BUSY: return "RES_MTT_BUSY";
134 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135 default: return "Unknown";
140 struct res_common com;
145 enum res_mpt_states {
146 RES_MPT_BUSY = RES_ANY_BUSY,
153 struct res_common com;
159 RES_EQ_BUSY = RES_ANY_BUSY,
165 struct res_common com;
170 RES_CQ_BUSY = RES_ANY_BUSY,
176 struct res_common com;
181 enum res_srq_states {
182 RES_SRQ_BUSY = RES_ANY_BUSY,
188 struct res_common com;
194 enum res_counter_states {
195 RES_COUNTER_BUSY = RES_ANY_BUSY,
196 RES_COUNTER_ALLOCATED,
200 struct res_common com;
204 enum res_xrcdn_states {
205 RES_XRCD_BUSY = RES_ANY_BUSY,
210 struct res_common com;
214 enum res_fs_rule_states {
215 RES_FS_RULE_BUSY = RES_ANY_BUSY,
216 RES_FS_RULE_ALLOCATED,
220 struct res_common com;
224 static int mlx4_is_eth(struct mlx4_dev *dev, int port)
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
229 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
231 struct rb_node *node = root->rb_node;
234 struct res_common *res = container_of(node, struct res_common,
237 if (res_id < res->res_id)
238 node = node->rb_left;
239 else if (res_id > res->res_id)
240 node = node->rb_right;
247 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
249 struct rb_node **new = &(root->rb_node), *parent = NULL;
251 /* Figure out where to put new node */
253 struct res_common *this = container_of(*new, struct res_common,
257 if (res->res_id < this->res_id)
258 new = &((*new)->rb_left);
259 else if (res->res_id > this->res_id)
260 new = &((*new)->rb_right);
265 /* Add new node and rebalance tree. */
266 rb_link_node(&res->node, parent, new);
267 rb_insert_color(&res->node, root);
282 static const char *ResourceType(enum mlx4_resource rt)
285 case RES_QP: return "RES_QP";
286 case RES_CQ: return "RES_CQ";
287 case RES_SRQ: return "RES_SRQ";
288 case RES_MPT: return "RES_MPT";
289 case RES_MTT: return "RES_MTT";
290 case RES_MAC: return "RES_MAC";
291 case RES_VLAN: return "RES_VLAN";
292 case RES_EQ: return "RES_EQ";
293 case RES_COUNTER: return "RES_COUNTER";
294 case RES_FS_RULE: return "RES_FS_RULE";
295 case RES_XRCD: return "RES_XRCD";
296 default: return "Unknown resource type !!!";
300 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302 enum mlx4_resource res_type, int count,
305 struct mlx4_priv *priv = mlx4_priv(dev);
306 struct resource_allocator *res_alloc =
307 &priv->mfunc.master.res_tracker.res_alloc[res_type];
309 int allocated, free, reserved, guaranteed, from_free;
311 if (slave > dev->num_vfs)
314 spin_lock(&res_alloc->alloc_lock);
315 allocated = (port > 0) ?
316 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
317 res_alloc->allocated[slave];
318 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
321 res_alloc->res_reserved;
322 guaranteed = res_alloc->guaranteed[slave];
324 if (allocated + count > res_alloc->quota[slave])
327 if (allocated + count <= guaranteed) {
330 /* portion may need to be obtained from free area */
331 if (guaranteed - allocated > 0)
332 from_free = count - (guaranteed - allocated);
336 if (free - from_free > reserved)
341 /* grant the request */
343 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
344 res_alloc->res_port_free[port - 1] -= count;
346 res_alloc->allocated[slave] += count;
347 res_alloc->res_free -= count;
352 spin_unlock(&res_alloc->alloc_lock);
356 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
357 enum mlx4_resource res_type, int count,
360 struct mlx4_priv *priv = mlx4_priv(dev);
361 struct resource_allocator *res_alloc =
362 &priv->mfunc.master.res_tracker.res_alloc[res_type];
364 if (slave > dev->num_vfs)
367 spin_lock(&res_alloc->alloc_lock);
369 res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
370 res_alloc->res_port_free[port - 1] += count;
372 res_alloc->allocated[slave] -= count;
373 res_alloc->res_free += count;
376 spin_unlock(&res_alloc->alloc_lock);
380 static inline void initialize_res_quotas(struct mlx4_dev *dev,
381 struct resource_allocator *res_alloc,
382 enum mlx4_resource res_type,
383 int vf, int num_instances)
385 res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
386 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
387 if (vf == mlx4_master_func_num(dev)) {
388 res_alloc->res_free = num_instances;
389 if (res_type == RES_MTT) {
390 /* reserved mtts will be taken out of the PF allocation */
391 res_alloc->res_free += dev->caps.reserved_mtts;
392 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
393 res_alloc->quota[vf] += dev->caps.reserved_mtts;
398 void mlx4_init_quotas(struct mlx4_dev *dev)
400 struct mlx4_priv *priv = mlx4_priv(dev);
403 /* quotas for VFs are initialized in mlx4_slave_cap */
404 if (mlx4_is_slave(dev))
407 if (!mlx4_is_mfunc(dev)) {
408 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
409 mlx4_num_reserved_sqps(dev);
410 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
411 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
412 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
413 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
417 pf = mlx4_master_func_num(dev);
419 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
421 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
423 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
425 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
427 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
429 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
431 struct mlx4_priv *priv = mlx4_priv(dev);
435 priv->mfunc.master.res_tracker.slave_list =
436 kzalloc(dev->num_slaves * sizeof(struct slave_list),
438 if (!priv->mfunc.master.res_tracker.slave_list)
441 for (i = 0 ; i < dev->num_slaves; i++) {
442 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
443 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
444 slave_list[i].res_list[t]);
445 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
448 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
450 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
451 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
453 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
454 struct resource_allocator *res_alloc =
455 &priv->mfunc.master.res_tracker.res_alloc[i];
456 res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
457 res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
458 if (i == RES_MAC || i == RES_VLAN)
459 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
460 (dev->num_vfs + 1) * sizeof(int),
463 res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
465 if (!res_alloc->quota || !res_alloc->guaranteed ||
466 !res_alloc->allocated)
469 spin_lock_init(&res_alloc->alloc_lock);
470 for (t = 0; t < dev->num_vfs + 1; t++) {
471 struct mlx4_active_ports actv_ports =
472 mlx4_get_active_ports(dev, t);
475 initialize_res_quotas(dev, res_alloc, RES_QP,
476 t, dev->caps.num_qps -
477 dev->caps.reserved_qps -
478 mlx4_num_reserved_sqps(dev));
481 initialize_res_quotas(dev, res_alloc, RES_CQ,
482 t, dev->caps.num_cqs -
483 dev->caps.reserved_cqs);
486 initialize_res_quotas(dev, res_alloc, RES_SRQ,
487 t, dev->caps.num_srqs -
488 dev->caps.reserved_srqs);
491 initialize_res_quotas(dev, res_alloc, RES_MPT,
492 t, dev->caps.num_mpts -
493 dev->caps.reserved_mrws);
496 initialize_res_quotas(dev, res_alloc, RES_MTT,
497 t, dev->caps.num_mtts -
498 dev->caps.reserved_mtts);
501 if (t == mlx4_master_func_num(dev)) {
502 int max_vfs_pport = 0;
503 /* Calculate the max vfs per port for */
505 for (j = 0; j < dev->caps.num_ports;
507 struct mlx4_slaves_pport slaves_pport =
508 mlx4_phys_to_slaves_pport(dev, j + 1);
509 unsigned current_slaves =
510 bitmap_weight(slaves_pport.slaves,
511 dev->caps.num_ports) - 1;
512 if (max_vfs_pport < current_slaves)
516 res_alloc->quota[t] =
519 res_alloc->guaranteed[t] = 2;
520 for (j = 0; j < MLX4_MAX_PORTS; j++)
521 res_alloc->res_port_free[j] =
524 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
525 res_alloc->guaranteed[t] = 2;
529 if (t == mlx4_master_func_num(dev)) {
530 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
531 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
532 for (j = 0; j < MLX4_MAX_PORTS; j++)
533 res_alloc->res_port_free[j] =
536 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
537 res_alloc->guaranteed[t] = 0;
541 res_alloc->quota[t] = dev->caps.max_counters;
542 res_alloc->guaranteed[t] = 0;
543 if (t == mlx4_master_func_num(dev))
544 res_alloc->res_free = res_alloc->quota[t];
549 if (i == RES_MAC || i == RES_VLAN) {
550 for (j = 0; j < dev->caps.num_ports; j++)
551 if (test_bit(j, actv_ports.ports))
552 res_alloc->res_port_rsvd[j] +=
553 res_alloc->guaranteed[t];
555 res_alloc->res_reserved += res_alloc->guaranteed[t];
559 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
563 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
564 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
565 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
566 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
567 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
568 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
569 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
574 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
575 enum mlx4_res_tracker_free_type type)
577 struct mlx4_priv *priv = mlx4_priv(dev);
580 if (priv->mfunc.master.res_tracker.slave_list) {
581 if (type != RES_TR_FREE_STRUCTS_ONLY) {
582 for (i = 0; i < dev->num_slaves; i++) {
583 if (type == RES_TR_FREE_ALL ||
584 dev->caps.function != i)
585 mlx4_delete_all_resources_for_slave(dev, i);
587 /* free master's vlans */
588 i = dev->caps.function;
589 mlx4_reset_roce_gids(dev, i);
590 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
591 rem_slave_vlans(dev, i);
592 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
595 if (type != RES_TR_FREE_SLAVES_ONLY) {
596 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
597 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
598 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
599 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
600 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
601 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
602 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
604 kfree(priv->mfunc.master.res_tracker.slave_list);
605 priv->mfunc.master.res_tracker.slave_list = NULL;
610 static void update_pkey_index(struct mlx4_dev *dev, int slave,
611 struct mlx4_cmd_mailbox *inbox)
613 u8 sched = *(u8 *)(inbox->buf + 64);
614 u8 orig_index = *(u8 *)(inbox->buf + 35);
616 struct mlx4_priv *priv = mlx4_priv(dev);
619 port = (sched >> 6 & 1) + 1;
621 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
622 *(u8 *)(inbox->buf + 35) = new_index;
625 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
628 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
629 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
630 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
633 if (MLX4_QP_ST_UD == ts) {
634 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
635 if (mlx4_is_eth(dev, port))
636 qp_ctx->pri_path.mgid_index =
637 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
639 qp_ctx->pri_path.mgid_index = slave | 0x80;
641 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
642 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
643 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
644 if (mlx4_is_eth(dev, port)) {
645 qp_ctx->pri_path.mgid_index +=
646 mlx4_get_base_gid_ix(dev, slave, port);
647 qp_ctx->pri_path.mgid_index &= 0x7f;
649 qp_ctx->pri_path.mgid_index = slave & 0x7F;
652 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
653 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
654 if (mlx4_is_eth(dev, port)) {
655 qp_ctx->alt_path.mgid_index +=
656 mlx4_get_base_gid_ix(dev, slave, port);
657 qp_ctx->alt_path.mgid_index &= 0x7f;
659 qp_ctx->alt_path.mgid_index = slave & 0x7F;
665 static int update_vport_qp_param(struct mlx4_dev *dev,
666 struct mlx4_cmd_mailbox *inbox,
669 struct mlx4_qp_context *qpc = inbox->buf + 8;
670 struct mlx4_vport_oper_state *vp_oper;
671 struct mlx4_priv *priv;
674 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
675 priv = mlx4_priv(dev);
676 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
678 if (MLX4_VGT != vp_oper->state.default_vlan) {
679 /* the reserved QPs (special, proxy, tunnel)
680 * do not operate over vlans
682 if (mlx4_is_qp_reserved(dev, qpn))
685 /* force strip vlan by clear vsd */
686 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
688 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
689 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
690 qpc->pri_path.vlan_control =
691 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
692 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
693 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
694 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
695 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
696 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
697 } else if (0 != vp_oper->state.default_vlan) {
698 qpc->pri_path.vlan_control =
699 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
700 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
701 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
702 } else { /* priority tagged */
703 qpc->pri_path.vlan_control =
704 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
705 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
708 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
709 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
710 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
711 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
712 qpc->pri_path.sched_queue &= 0xC7;
713 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
715 if (vp_oper->state.spoofchk) {
716 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
717 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
722 static int mpt_mask(struct mlx4_dev *dev)
724 return dev->caps.num_mpts - 1;
727 static void *find_res(struct mlx4_dev *dev, u64 res_id,
728 enum mlx4_resource type)
730 struct mlx4_priv *priv = mlx4_priv(dev);
732 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
736 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
737 enum mlx4_resource type,
740 struct res_common *r;
743 spin_lock_irq(mlx4_tlock(dev));
744 r = find_res(dev, res_id, type);
750 if (r->state == RES_ANY_BUSY) {
755 if (r->owner != slave) {
760 r->from_state = r->state;
761 r->state = RES_ANY_BUSY;
764 *((struct res_common **)res) = r;
767 spin_unlock_irq(mlx4_tlock(dev));
771 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
772 enum mlx4_resource type,
773 u64 res_id, int *slave)
776 struct res_common *r;
782 spin_lock(mlx4_tlock(dev));
784 r = find_res(dev, id, type);
789 spin_unlock(mlx4_tlock(dev));
794 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
795 enum mlx4_resource type)
797 struct res_common *r;
799 spin_lock_irq(mlx4_tlock(dev));
800 r = find_res(dev, res_id, type);
802 r->state = r->from_state;
803 spin_unlock_irq(mlx4_tlock(dev));
806 static struct res_common *alloc_qp_tr(int id)
810 ret = kzalloc(sizeof *ret, GFP_KERNEL);
814 ret->com.res_id = id;
815 ret->com.state = RES_QP_RESERVED;
817 INIT_LIST_HEAD(&ret->mcg_list);
818 spin_lock_init(&ret->mcg_spl);
819 atomic_set(&ret->ref_count, 0);
824 static struct res_common *alloc_mtt_tr(int id, int order)
828 ret = kzalloc(sizeof *ret, GFP_KERNEL);
832 ret->com.res_id = id;
834 ret->com.state = RES_MTT_ALLOCATED;
835 atomic_set(&ret->ref_count, 0);
840 static struct res_common *alloc_mpt_tr(int id, int key)
844 ret = kzalloc(sizeof *ret, GFP_KERNEL);
848 ret->com.res_id = id;
849 ret->com.state = RES_MPT_RESERVED;
855 static struct res_common *alloc_eq_tr(int id)
859 ret = kzalloc(sizeof *ret, GFP_KERNEL);
863 ret->com.res_id = id;
864 ret->com.state = RES_EQ_RESERVED;
869 static struct res_common *alloc_cq_tr(int id)
873 ret = kzalloc(sizeof *ret, GFP_KERNEL);
877 ret->com.res_id = id;
878 ret->com.state = RES_CQ_ALLOCATED;
879 atomic_set(&ret->ref_count, 0);
884 static struct res_common *alloc_srq_tr(int id)
888 ret = kzalloc(sizeof *ret, GFP_KERNEL);
892 ret->com.res_id = id;
893 ret->com.state = RES_SRQ_ALLOCATED;
894 atomic_set(&ret->ref_count, 0);
899 static struct res_common *alloc_counter_tr(int id)
901 struct res_counter *ret;
903 ret = kzalloc(sizeof *ret, GFP_KERNEL);
907 ret->com.res_id = id;
908 ret->com.state = RES_COUNTER_ALLOCATED;
913 static struct res_common *alloc_xrcdn_tr(int id)
915 struct res_xrcdn *ret;
917 ret = kzalloc(sizeof *ret, GFP_KERNEL);
921 ret->com.res_id = id;
922 ret->com.state = RES_XRCD_ALLOCATED;
927 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
929 struct res_fs_rule *ret;
931 ret = kzalloc(sizeof *ret, GFP_KERNEL);
935 ret->com.res_id = id;
936 ret->com.state = RES_FS_RULE_ALLOCATED;
941 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
944 struct res_common *ret;
948 ret = alloc_qp_tr(id);
951 ret = alloc_mpt_tr(id, extra);
954 ret = alloc_mtt_tr(id, extra);
957 ret = alloc_eq_tr(id);
960 ret = alloc_cq_tr(id);
963 ret = alloc_srq_tr(id);
966 pr_err("implementation missing\n");
969 ret = alloc_counter_tr(id);
972 ret = alloc_xrcdn_tr(id);
975 ret = alloc_fs_rule_tr(id, extra);
986 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
987 enum mlx4_resource type, int extra)
991 struct mlx4_priv *priv = mlx4_priv(dev);
992 struct res_common **res_arr;
993 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
994 struct rb_root *root = &tracker->res_tree[type];
996 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1000 for (i = 0; i < count; ++i) {
1001 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1003 for (--i; i >= 0; --i)
1011 spin_lock_irq(mlx4_tlock(dev));
1012 for (i = 0; i < count; ++i) {
1013 if (find_res(dev, base + i, type)) {
1017 err = res_tracker_insert(root, res_arr[i]);
1020 list_add_tail(&res_arr[i]->list,
1021 &tracker->slave_list[slave].res_list[type]);
1023 spin_unlock_irq(mlx4_tlock(dev));
1029 for (--i; i >= base; --i)
1030 rb_erase(&res_arr[i]->node, root);
1032 spin_unlock_irq(mlx4_tlock(dev));
1034 for (i = 0; i < count; ++i)
1042 static int remove_qp_ok(struct res_qp *res)
1044 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1045 !list_empty(&res->mcg_list)) {
1046 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1047 res->com.state, atomic_read(&res->ref_count));
1049 } else if (res->com.state != RES_QP_RESERVED) {
1056 static int remove_mtt_ok(struct res_mtt *res, int order)
1058 if (res->com.state == RES_MTT_BUSY ||
1059 atomic_read(&res->ref_count)) {
1060 pr_devel("%s-%d: state %s, ref_count %d\n",
1062 mtt_states_str(res->com.state),
1063 atomic_read(&res->ref_count));
1065 } else if (res->com.state != RES_MTT_ALLOCATED)
1067 else if (res->order != order)
1073 static int remove_mpt_ok(struct res_mpt *res)
1075 if (res->com.state == RES_MPT_BUSY)
1077 else if (res->com.state != RES_MPT_RESERVED)
1083 static int remove_eq_ok(struct res_eq *res)
1085 if (res->com.state == RES_MPT_BUSY)
1087 else if (res->com.state != RES_MPT_RESERVED)
1093 static int remove_counter_ok(struct res_counter *res)
1095 if (res->com.state == RES_COUNTER_BUSY)
1097 else if (res->com.state != RES_COUNTER_ALLOCATED)
1103 static int remove_xrcdn_ok(struct res_xrcdn *res)
1105 if (res->com.state == RES_XRCD_BUSY)
1107 else if (res->com.state != RES_XRCD_ALLOCATED)
1113 static int remove_fs_rule_ok(struct res_fs_rule *res)
1115 if (res->com.state == RES_FS_RULE_BUSY)
1117 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1123 static int remove_cq_ok(struct res_cq *res)
1125 if (res->com.state == RES_CQ_BUSY)
1127 else if (res->com.state != RES_CQ_ALLOCATED)
1133 static int remove_srq_ok(struct res_srq *res)
1135 if (res->com.state == RES_SRQ_BUSY)
1137 else if (res->com.state != RES_SRQ_ALLOCATED)
1143 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1147 return remove_qp_ok((struct res_qp *)res);
1149 return remove_cq_ok((struct res_cq *)res);
1151 return remove_srq_ok((struct res_srq *)res);
1153 return remove_mpt_ok((struct res_mpt *)res);
1155 return remove_mtt_ok((struct res_mtt *)res, extra);
1159 return remove_eq_ok((struct res_eq *)res);
1161 return remove_counter_ok((struct res_counter *)res);
1163 return remove_xrcdn_ok((struct res_xrcdn *)res);
1165 return remove_fs_rule_ok((struct res_fs_rule *)res);
1171 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1172 enum mlx4_resource type, int extra)
1176 struct mlx4_priv *priv = mlx4_priv(dev);
1177 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1178 struct res_common *r;
1180 spin_lock_irq(mlx4_tlock(dev));
1181 for (i = base; i < base + count; ++i) {
1182 r = res_tracker_lookup(&tracker->res_tree[type], i);
1187 if (r->owner != slave) {
1191 err = remove_ok(r, type, extra);
1196 for (i = base; i < base + count; ++i) {
1197 r = res_tracker_lookup(&tracker->res_tree[type], i);
1198 rb_erase(&r->node, &tracker->res_tree[type]);
1205 spin_unlock_irq(mlx4_tlock(dev));
1210 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1211 enum res_qp_states state, struct res_qp **qp,
1214 struct mlx4_priv *priv = mlx4_priv(dev);
1215 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1219 spin_lock_irq(mlx4_tlock(dev));
1220 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1223 else if (r->com.owner != slave)
1228 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1229 __func__, r->com.res_id);
1233 case RES_QP_RESERVED:
1234 if (r->com.state == RES_QP_MAPPED && !alloc)
1237 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1242 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1243 r->com.state == RES_QP_HW)
1246 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1254 if (r->com.state != RES_QP_MAPPED)
1262 r->com.from_state = r->com.state;
1263 r->com.to_state = state;
1264 r->com.state = RES_QP_BUSY;
1270 spin_unlock_irq(mlx4_tlock(dev));
1275 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1276 enum res_mpt_states state, struct res_mpt **mpt)
1278 struct mlx4_priv *priv = mlx4_priv(dev);
1279 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1283 spin_lock_irq(mlx4_tlock(dev));
1284 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1287 else if (r->com.owner != slave)
1295 case RES_MPT_RESERVED:
1296 if (r->com.state != RES_MPT_MAPPED)
1300 case RES_MPT_MAPPED:
1301 if (r->com.state != RES_MPT_RESERVED &&
1302 r->com.state != RES_MPT_HW)
1307 if (r->com.state != RES_MPT_MAPPED)
1315 r->com.from_state = r->com.state;
1316 r->com.to_state = state;
1317 r->com.state = RES_MPT_BUSY;
1323 spin_unlock_irq(mlx4_tlock(dev));
1328 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1329 enum res_eq_states state, struct res_eq **eq)
1331 struct mlx4_priv *priv = mlx4_priv(dev);
1332 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1336 spin_lock_irq(mlx4_tlock(dev));
1337 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1340 else if (r->com.owner != slave)
1348 case RES_EQ_RESERVED:
1349 if (r->com.state != RES_EQ_HW)
1354 if (r->com.state != RES_EQ_RESERVED)
1363 r->com.from_state = r->com.state;
1364 r->com.to_state = state;
1365 r->com.state = RES_EQ_BUSY;
1371 spin_unlock_irq(mlx4_tlock(dev));
1376 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1377 enum res_cq_states state, struct res_cq **cq)
1379 struct mlx4_priv *priv = mlx4_priv(dev);
1380 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1384 spin_lock_irq(mlx4_tlock(dev));
1385 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1388 } else if (r->com.owner != slave) {
1390 } else if (state == RES_CQ_ALLOCATED) {
1391 if (r->com.state != RES_CQ_HW)
1393 else if (atomic_read(&r->ref_count))
1397 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1404 r->com.from_state = r->com.state;
1405 r->com.to_state = state;
1406 r->com.state = RES_CQ_BUSY;
1411 spin_unlock_irq(mlx4_tlock(dev));
1416 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1417 enum res_srq_states state, struct res_srq **srq)
1419 struct mlx4_priv *priv = mlx4_priv(dev);
1420 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1424 spin_lock_irq(mlx4_tlock(dev));
1425 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1428 } else if (r->com.owner != slave) {
1430 } else if (state == RES_SRQ_ALLOCATED) {
1431 if (r->com.state != RES_SRQ_HW)
1433 else if (atomic_read(&r->ref_count))
1435 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1440 r->com.from_state = r->com.state;
1441 r->com.to_state = state;
1442 r->com.state = RES_SRQ_BUSY;
1447 spin_unlock_irq(mlx4_tlock(dev));
1452 static void res_abort_move(struct mlx4_dev *dev, int slave,
1453 enum mlx4_resource type, int id)
1455 struct mlx4_priv *priv = mlx4_priv(dev);
1456 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1457 struct res_common *r;
1459 spin_lock_irq(mlx4_tlock(dev));
1460 r = res_tracker_lookup(&tracker->res_tree[type], id);
1461 if (r && (r->owner == slave))
1462 r->state = r->from_state;
1463 spin_unlock_irq(mlx4_tlock(dev));
1466 static void res_end_move(struct mlx4_dev *dev, int slave,
1467 enum mlx4_resource type, int id)
1469 struct mlx4_priv *priv = mlx4_priv(dev);
1470 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1471 struct res_common *r;
1473 spin_lock_irq(mlx4_tlock(dev));
1474 r = res_tracker_lookup(&tracker->res_tree[type], id);
1475 if (r && (r->owner == slave))
1476 r->state = r->to_state;
1477 spin_unlock_irq(mlx4_tlock(dev));
1480 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1482 return mlx4_is_qp_reserved(dev, qpn) &&
1483 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1486 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1488 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1491 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1492 u64 in_param, u64 *out_param)
1501 case RES_OP_RESERVE:
1502 count = get_param_l(&in_param);
1503 align = get_param_h(&in_param);
1504 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1508 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1510 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1514 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1516 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1517 __mlx4_qp_release_range(dev, base, count);
1520 set_param_l(out_param, base);
1522 case RES_OP_MAP_ICM:
1523 qpn = get_param_l(&in_param) & 0x7fffff;
1524 if (valid_reserved(dev, slave, qpn)) {
1525 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1530 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1535 if (!fw_reserved(dev, qpn)) {
1536 err = __mlx4_qp_alloc_icm(dev, qpn);
1538 res_abort_move(dev, slave, RES_QP, qpn);
1543 res_end_move(dev, slave, RES_QP, qpn);
1553 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1554 u64 in_param, u64 *out_param)
1560 if (op != RES_OP_RESERVE_AND_MAP)
1563 order = get_param_l(&in_param);
1565 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1569 base = __mlx4_alloc_mtt_range(dev, order);
1571 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1575 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1577 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1578 __mlx4_free_mtt_range(dev, base, order);
1580 set_param_l(out_param, base);
1586 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1587 u64 in_param, u64 *out_param)
1592 struct res_mpt *mpt;
1595 case RES_OP_RESERVE:
1596 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1600 index = __mlx4_mpt_reserve(dev);
1602 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1605 id = index & mpt_mask(dev);
1607 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1609 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1610 __mlx4_mpt_release(dev, index);
1613 set_param_l(out_param, index);
1615 case RES_OP_MAP_ICM:
1616 index = get_param_l(&in_param);
1617 id = index & mpt_mask(dev);
1618 err = mr_res_start_move_to(dev, slave, id,
1619 RES_MPT_MAPPED, &mpt);
1623 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1625 res_abort_move(dev, slave, RES_MPT, id);
1629 res_end_move(dev, slave, RES_MPT, id);
1635 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1636 u64 in_param, u64 *out_param)
1642 case RES_OP_RESERVE_AND_MAP:
1643 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1647 err = __mlx4_cq_alloc_icm(dev, &cqn);
1649 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1653 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1655 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1656 __mlx4_cq_free_icm(dev, cqn);
1660 set_param_l(out_param, cqn);
1670 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1671 u64 in_param, u64 *out_param)
1677 case RES_OP_RESERVE_AND_MAP:
1678 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1682 err = __mlx4_srq_alloc_icm(dev, &srqn);
1684 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1688 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1690 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1691 __mlx4_srq_free_icm(dev, srqn);
1695 set_param_l(out_param, srqn);
1705 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1706 u8 smac_index, u64 *mac)
1708 struct mlx4_priv *priv = mlx4_priv(dev);
1709 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1710 struct list_head *mac_list =
1711 &tracker->slave_list[slave].res_list[RES_MAC];
1712 struct mac_res *res, *tmp;
1714 list_for_each_entry_safe(res, tmp, mac_list, list) {
1715 if (res->smac_index == smac_index && res->port == (u8) port) {
1723 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1725 struct mlx4_priv *priv = mlx4_priv(dev);
1726 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1727 struct list_head *mac_list =
1728 &tracker->slave_list[slave].res_list[RES_MAC];
1729 struct mac_res *res, *tmp;
1731 list_for_each_entry_safe(res, tmp, mac_list, list) {
1732 if (res->mac == mac && res->port == (u8) port) {
1733 /* mac found. update ref count */
1739 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1741 res = kzalloc(sizeof *res, GFP_KERNEL);
1743 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1747 res->port = (u8) port;
1748 res->smac_index = smac_index;
1750 list_add_tail(&res->list,
1751 &tracker->slave_list[slave].res_list[RES_MAC]);
1755 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1758 struct mlx4_priv *priv = mlx4_priv(dev);
1759 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1760 struct list_head *mac_list =
1761 &tracker->slave_list[slave].res_list[RES_MAC];
1762 struct mac_res *res, *tmp;
1764 list_for_each_entry_safe(res, tmp, mac_list, list) {
1765 if (res->mac == mac && res->port == (u8) port) {
1766 if (!--res->ref_count) {
1767 list_del(&res->list);
1768 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1776 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1778 struct mlx4_priv *priv = mlx4_priv(dev);
1779 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1780 struct list_head *mac_list =
1781 &tracker->slave_list[slave].res_list[RES_MAC];
1782 struct mac_res *res, *tmp;
1785 list_for_each_entry_safe(res, tmp, mac_list, list) {
1786 list_del(&res->list);
1787 /* dereference the mac the num times the slave referenced it */
1788 for (i = 0; i < res->ref_count; i++)
1789 __mlx4_unregister_mac(dev, res->port, res->mac);
1790 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1795 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1796 u64 in_param, u64 *out_param, int in_port)
1803 if (op != RES_OP_RESERVE_AND_MAP)
1806 port = !in_port ? get_param_l(out_param) : in_port;
1807 port = mlx4_slave_convert_port(
1814 err = __mlx4_register_mac(dev, port, mac);
1817 set_param_l(out_param, err);
1822 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1824 __mlx4_unregister_mac(dev, port, mac);
1829 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1830 int port, int vlan_index)
1832 struct mlx4_priv *priv = mlx4_priv(dev);
1833 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1834 struct list_head *vlan_list =
1835 &tracker->slave_list[slave].res_list[RES_VLAN];
1836 struct vlan_res *res, *tmp;
1838 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1839 if (res->vlan == vlan && res->port == (u8) port) {
1840 /* vlan found. update ref count */
1846 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1848 res = kzalloc(sizeof(*res), GFP_KERNEL);
1850 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1854 res->port = (u8) port;
1855 res->vlan_index = vlan_index;
1857 list_add_tail(&res->list,
1858 &tracker->slave_list[slave].res_list[RES_VLAN]);
1863 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1866 struct mlx4_priv *priv = mlx4_priv(dev);
1867 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1868 struct list_head *vlan_list =
1869 &tracker->slave_list[slave].res_list[RES_VLAN];
1870 struct vlan_res *res, *tmp;
1872 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1873 if (res->vlan == vlan && res->port == (u8) port) {
1874 if (!--res->ref_count) {
1875 list_del(&res->list);
1876 mlx4_release_resource(dev, slave, RES_VLAN,
1885 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1887 struct mlx4_priv *priv = mlx4_priv(dev);
1888 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1889 struct list_head *vlan_list =
1890 &tracker->slave_list[slave].res_list[RES_VLAN];
1891 struct vlan_res *res, *tmp;
1894 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1895 list_del(&res->list);
1896 /* dereference the vlan the num times the slave referenced it */
1897 for (i = 0; i < res->ref_count; i++)
1898 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1899 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1904 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1905 u64 in_param, u64 *out_param, int in_port)
1907 struct mlx4_priv *priv = mlx4_priv(dev);
1908 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1914 port = !in_port ? get_param_l(out_param) : in_port;
1916 if (!port || op != RES_OP_RESERVE_AND_MAP)
1919 port = mlx4_slave_convert_port(
1924 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1925 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1926 slave_state[slave].old_vlan_api = true;
1930 vlan = (u16) in_param;
1932 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1934 set_param_l(out_param, (u32) vlan_index);
1935 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1937 __mlx4_unregister_vlan(dev, port, vlan);
1942 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1943 u64 in_param, u64 *out_param)
1948 if (op != RES_OP_RESERVE)
1951 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1955 err = __mlx4_counter_alloc(dev, &index);
1957 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1961 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1963 __mlx4_counter_free(dev, index);
1964 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1966 set_param_l(out_param, index);
1972 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1973 u64 in_param, u64 *out_param)
1978 if (op != RES_OP_RESERVE)
1981 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1985 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1987 __mlx4_xrcd_free(dev, xrcdn);
1989 set_param_l(out_param, xrcdn);
1994 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1995 struct mlx4_vhcr *vhcr,
1996 struct mlx4_cmd_mailbox *inbox,
1997 struct mlx4_cmd_mailbox *outbox,
1998 struct mlx4_cmd_info *cmd)
2001 int alop = vhcr->op_modifier;
2003 switch (vhcr->in_modifier & 0xFF) {
2005 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2006 vhcr->in_param, &vhcr->out_param);
2010 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2011 vhcr->in_param, &vhcr->out_param);
2015 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2016 vhcr->in_param, &vhcr->out_param);
2020 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2021 vhcr->in_param, &vhcr->out_param);
2025 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2026 vhcr->in_param, &vhcr->out_param);
2030 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2031 vhcr->in_param, &vhcr->out_param,
2032 (vhcr->in_modifier >> 8) & 0xFF);
2036 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2037 vhcr->in_param, &vhcr->out_param,
2038 (vhcr->in_modifier >> 8) & 0xFF);
2042 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2043 vhcr->in_param, &vhcr->out_param);
2047 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2048 vhcr->in_param, &vhcr->out_param);
2059 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2068 case RES_OP_RESERVE:
2069 base = get_param_l(&in_param) & 0x7fffff;
2070 count = get_param_h(&in_param);
2071 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2074 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2075 __mlx4_qp_release_range(dev, base, count);
2077 case RES_OP_MAP_ICM:
2078 qpn = get_param_l(&in_param) & 0x7fffff;
2079 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2084 if (!fw_reserved(dev, qpn))
2085 __mlx4_qp_free_icm(dev, qpn);
2087 res_end_move(dev, slave, RES_QP, qpn);
2089 if (valid_reserved(dev, slave, qpn))
2090 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2099 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2100 u64 in_param, u64 *out_param)
2106 if (op != RES_OP_RESERVE_AND_MAP)
2109 base = get_param_l(&in_param);
2110 order = get_param_h(&in_param);
2111 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2113 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2114 __mlx4_free_mtt_range(dev, base, order);
2119 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2125 struct res_mpt *mpt;
2128 case RES_OP_RESERVE:
2129 index = get_param_l(&in_param);
2130 id = index & mpt_mask(dev);
2131 err = get_res(dev, slave, id, RES_MPT, &mpt);
2135 put_res(dev, slave, id, RES_MPT);
2137 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2140 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2141 __mlx4_mpt_release(dev, index);
2143 case RES_OP_MAP_ICM:
2144 index = get_param_l(&in_param);
2145 id = index & mpt_mask(dev);
2146 err = mr_res_start_move_to(dev, slave, id,
2147 RES_MPT_RESERVED, &mpt);
2151 __mlx4_mpt_free_icm(dev, mpt->key);
2152 res_end_move(dev, slave, RES_MPT, id);
2162 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2163 u64 in_param, u64 *out_param)
2169 case RES_OP_RESERVE_AND_MAP:
2170 cqn = get_param_l(&in_param);
2171 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2175 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2176 __mlx4_cq_free_icm(dev, cqn);
2187 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2188 u64 in_param, u64 *out_param)
2194 case RES_OP_RESERVE_AND_MAP:
2195 srqn = get_param_l(&in_param);
2196 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2200 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2201 __mlx4_srq_free_icm(dev, srqn);
2212 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2213 u64 in_param, u64 *out_param, int in_port)
2219 case RES_OP_RESERVE_AND_MAP:
2220 port = !in_port ? get_param_l(out_param) : in_port;
2221 port = mlx4_slave_convert_port(
2226 mac_del_from_slave(dev, slave, in_param, port);
2227 __mlx4_unregister_mac(dev, port, in_param);
2238 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2239 u64 in_param, u64 *out_param, int port)
2241 struct mlx4_priv *priv = mlx4_priv(dev);
2242 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2245 port = mlx4_slave_convert_port(
2251 case RES_OP_RESERVE_AND_MAP:
2252 if (slave_state[slave].old_vlan_api)
2256 vlan_del_from_slave(dev, slave, in_param, port);
2257 __mlx4_unregister_vlan(dev, port, in_param);
2267 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2268 u64 in_param, u64 *out_param)
2273 if (op != RES_OP_RESERVE)
2276 index = get_param_l(&in_param);
2277 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2281 __mlx4_counter_free(dev, index);
2282 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2287 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2288 u64 in_param, u64 *out_param)
2293 if (op != RES_OP_RESERVE)
2296 xrcdn = get_param_l(&in_param);
2297 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2301 __mlx4_xrcd_free(dev, xrcdn);
2306 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2307 struct mlx4_vhcr *vhcr,
2308 struct mlx4_cmd_mailbox *inbox,
2309 struct mlx4_cmd_mailbox *outbox,
2310 struct mlx4_cmd_info *cmd)
2313 int alop = vhcr->op_modifier;
2315 switch (vhcr->in_modifier & 0xFF) {
2317 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2322 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2323 vhcr->in_param, &vhcr->out_param);
2327 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2332 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2333 vhcr->in_param, &vhcr->out_param);
2337 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2338 vhcr->in_param, &vhcr->out_param);
2342 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2343 vhcr->in_param, &vhcr->out_param,
2344 (vhcr->in_modifier >> 8) & 0xFF);
2348 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2349 vhcr->in_param, &vhcr->out_param,
2350 (vhcr->in_modifier >> 8) & 0xFF);
2354 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2355 vhcr->in_param, &vhcr->out_param);
2359 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2360 vhcr->in_param, &vhcr->out_param);
2368 /* ugly but other choices are uglier */
2369 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2371 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2374 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2376 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2379 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2381 return be32_to_cpu(mpt->mtt_sz);
2384 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2386 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2389 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2391 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2394 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2396 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2399 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2401 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2404 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2406 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2409 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2411 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2414 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2416 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2417 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2418 int log_sq_sride = qpc->sq_size_stride & 7;
2419 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2420 int log_rq_stride = qpc->rq_size_stride & 7;
2421 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2422 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2423 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2424 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2429 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2431 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2432 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2433 total_mem = sq_size + rq_size;
2435 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2441 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2442 int size, struct res_mtt *mtt)
2444 int res_start = mtt->com.res_id;
2445 int res_size = (1 << mtt->order);
2447 if (start < res_start || start + size > res_start + res_size)
2452 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2453 struct mlx4_vhcr *vhcr,
2454 struct mlx4_cmd_mailbox *inbox,
2455 struct mlx4_cmd_mailbox *outbox,
2456 struct mlx4_cmd_info *cmd)
2459 int index = vhcr->in_modifier;
2460 struct res_mtt *mtt;
2461 struct res_mpt *mpt;
2462 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2468 id = index & mpt_mask(dev);
2469 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2473 /* Disable memory windows for VFs. */
2474 if (!mr_is_region(inbox->buf)) {
2479 /* Make sure that the PD bits related to the slave id are zeros. */
2480 pd = mr_get_pd(inbox->buf);
2481 pd_slave = (pd >> 17) & 0x7f;
2482 if (pd_slave != 0 && pd_slave != slave) {
2487 if (mr_is_fmr(inbox->buf)) {
2488 /* FMR and Bind Enable are forbidden in slave devices. */
2489 if (mr_is_bind_enabled(inbox->buf)) {
2493 /* FMR and Memory Windows are also forbidden. */
2494 if (!mr_is_region(inbox->buf)) {
2500 phys = mr_phys_mpt(inbox->buf);
2502 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2506 err = check_mtt_range(dev, slave, mtt_base,
2507 mr_get_mtt_size(inbox->buf), mtt);
2514 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2519 atomic_inc(&mtt->ref_count);
2520 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2523 res_end_move(dev, slave, RES_MPT, id);
2528 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2530 res_abort_move(dev, slave, RES_MPT, id);
2535 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2536 struct mlx4_vhcr *vhcr,
2537 struct mlx4_cmd_mailbox *inbox,
2538 struct mlx4_cmd_mailbox *outbox,
2539 struct mlx4_cmd_info *cmd)
2542 int index = vhcr->in_modifier;
2543 struct res_mpt *mpt;
2546 id = index & mpt_mask(dev);
2547 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2551 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2556 atomic_dec(&mpt->mtt->ref_count);
2558 res_end_move(dev, slave, RES_MPT, id);
2562 res_abort_move(dev, slave, RES_MPT, id);
2567 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2568 struct mlx4_vhcr *vhcr,
2569 struct mlx4_cmd_mailbox *inbox,
2570 struct mlx4_cmd_mailbox *outbox,
2571 struct mlx4_cmd_info *cmd)
2574 int index = vhcr->in_modifier;
2575 struct res_mpt *mpt;
2578 id = index & mpt_mask(dev);
2579 err = get_res(dev, slave, id, RES_MPT, &mpt);
2583 if (mpt->com.from_state != RES_MPT_HW) {
2588 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2591 put_res(dev, slave, id, RES_MPT);
2595 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2597 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2600 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2602 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2605 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2607 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2610 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2611 struct mlx4_qp_context *context)
2613 u32 qpn = vhcr->in_modifier & 0xffffff;
2616 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2619 /* adjust qkey in qp context */
2620 context->qkey = cpu_to_be32(qkey);
2623 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2624 struct mlx4_vhcr *vhcr,
2625 struct mlx4_cmd_mailbox *inbox,
2626 struct mlx4_cmd_mailbox *outbox,
2627 struct mlx4_cmd_info *cmd)
2630 int qpn = vhcr->in_modifier & 0x7fffff;
2631 struct res_mtt *mtt;
2633 struct mlx4_qp_context *qpc = inbox->buf + 8;
2634 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2635 int mtt_size = qp_get_mtt_size(qpc);
2638 int rcqn = qp_get_rcqn(qpc);
2639 int scqn = qp_get_scqn(qpc);
2640 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2641 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2642 struct res_srq *srq;
2643 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2645 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2648 qp->local_qpn = local_qpn;
2649 qp->sched_queue = 0;
2651 qp->vlan_control = 0;
2653 qp->pri_path_fl = 0;
2656 qp->qpc_flags = be32_to_cpu(qpc->flags);
2658 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2662 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2666 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2671 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2678 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2683 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2684 update_pkey_index(dev, slave, inbox);
2685 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2688 atomic_inc(&mtt->ref_count);
2690 atomic_inc(&rcq->ref_count);
2692 atomic_inc(&scq->ref_count);
2696 put_res(dev, slave, scqn, RES_CQ);
2699 atomic_inc(&srq->ref_count);
2700 put_res(dev, slave, srqn, RES_SRQ);
2703 put_res(dev, slave, rcqn, RES_CQ);
2704 put_res(dev, slave, mtt_base, RES_MTT);
2705 res_end_move(dev, slave, RES_QP, qpn);
2711 put_res(dev, slave, srqn, RES_SRQ);
2714 put_res(dev, slave, scqn, RES_CQ);
2716 put_res(dev, slave, rcqn, RES_CQ);
2718 put_res(dev, slave, mtt_base, RES_MTT);
2720 res_abort_move(dev, slave, RES_QP, qpn);
2725 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2727 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2730 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2732 int log_eq_size = eqc->log_eq_size & 0x1f;
2733 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2735 if (log_eq_size + 5 < page_shift)
2738 return 1 << (log_eq_size + 5 - page_shift);
2741 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2743 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2746 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2748 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2749 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2751 if (log_cq_size + 5 < page_shift)
2754 return 1 << (log_cq_size + 5 - page_shift);
2757 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2758 struct mlx4_vhcr *vhcr,
2759 struct mlx4_cmd_mailbox *inbox,
2760 struct mlx4_cmd_mailbox *outbox,
2761 struct mlx4_cmd_info *cmd)
2764 int eqn = vhcr->in_modifier;
2765 int res_id = (slave << 8) | eqn;
2766 struct mlx4_eq_context *eqc = inbox->buf;
2767 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2768 int mtt_size = eq_get_mtt_size(eqc);
2770 struct res_mtt *mtt;
2772 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2775 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2779 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2783 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2787 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2791 atomic_inc(&mtt->ref_count);
2793 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2794 res_end_move(dev, slave, RES_EQ, res_id);
2798 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2800 res_abort_move(dev, slave, RES_EQ, res_id);
2802 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2806 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2807 int len, struct res_mtt **res)
2809 struct mlx4_priv *priv = mlx4_priv(dev);
2810 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2811 struct res_mtt *mtt;
2814 spin_lock_irq(mlx4_tlock(dev));
2815 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2817 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2819 mtt->com.from_state = mtt->com.state;
2820 mtt->com.state = RES_MTT_BUSY;
2825 spin_unlock_irq(mlx4_tlock(dev));
2830 static int verify_qp_parameters(struct mlx4_dev *dev,
2831 struct mlx4_cmd_mailbox *inbox,
2832 enum qp_transition transition, u8 slave)
2835 struct mlx4_qp_context *qp_ctx;
2836 enum mlx4_qp_optpar optpar;
2840 qp_ctx = inbox->buf + 8;
2841 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2842 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2846 case MLX4_QP_ST_XRC:
2848 switch (transition) {
2849 case QP_TRANS_INIT2RTR:
2850 case QP_TRANS_RTR2RTS:
2851 case QP_TRANS_RTS2RTS:
2852 case QP_TRANS_SQD2SQD:
2853 case QP_TRANS_SQD2RTS:
2854 if (slave != mlx4_master_func_num(dev))
2855 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2856 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2857 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2858 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2861 if (qp_ctx->pri_path.mgid_index >= num_gids)
2864 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2865 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2866 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2867 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2870 if (qp_ctx->alt_path.mgid_index >= num_gids)
2886 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2887 struct mlx4_vhcr *vhcr,
2888 struct mlx4_cmd_mailbox *inbox,
2889 struct mlx4_cmd_mailbox *outbox,
2890 struct mlx4_cmd_info *cmd)
2892 struct mlx4_mtt mtt;
2893 __be64 *page_list = inbox->buf;
2894 u64 *pg_list = (u64 *)page_list;
2896 struct res_mtt *rmtt = NULL;
2897 int start = be64_to_cpu(page_list[0]);
2898 int npages = vhcr->in_modifier;
2901 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2905 /* Call the SW implementation of write_mtt:
2906 * - Prepare a dummy mtt struct
2907 * - Translate inbox contents to simple addresses in host endianess */
2908 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2909 we don't really use it */
2912 for (i = 0; i < npages; ++i)
2913 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2915 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2916 ((u64 *)page_list + 2));
2919 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2924 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2925 struct mlx4_vhcr *vhcr,
2926 struct mlx4_cmd_mailbox *inbox,
2927 struct mlx4_cmd_mailbox *outbox,
2928 struct mlx4_cmd_info *cmd)
2930 int eqn = vhcr->in_modifier;
2931 int res_id = eqn | (slave << 8);
2935 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2939 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2943 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2947 atomic_dec(&eq->mtt->ref_count);
2948 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2949 res_end_move(dev, slave, RES_EQ, res_id);
2950 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2955 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2957 res_abort_move(dev, slave, RES_EQ, res_id);
2962 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2964 struct mlx4_priv *priv = mlx4_priv(dev);
2965 struct mlx4_slave_event_eq_info *event_eq;
2966 struct mlx4_cmd_mailbox *mailbox;
2967 u32 in_modifier = 0;
2972 if (!priv->mfunc.master.slave_state)
2975 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2977 /* Create the event only if the slave is registered */
2978 if (event_eq->eqn < 0)
2981 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2982 res_id = (slave << 8) | event_eq->eqn;
2983 err = get_res(dev, slave, res_id, RES_EQ, &req);
2987 if (req->com.from_state != RES_EQ_HW) {
2992 mailbox = mlx4_alloc_cmd_mailbox(dev);
2993 if (IS_ERR(mailbox)) {
2994 err = PTR_ERR(mailbox);
2998 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3000 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3003 memcpy(mailbox->buf, (u8 *) eqe, 28);
3005 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3007 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3008 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3011 put_res(dev, slave, res_id, RES_EQ);
3012 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3013 mlx4_free_cmd_mailbox(dev, mailbox);
3017 put_res(dev, slave, res_id, RES_EQ);
3020 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3024 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3025 struct mlx4_vhcr *vhcr,
3026 struct mlx4_cmd_mailbox *inbox,
3027 struct mlx4_cmd_mailbox *outbox,
3028 struct mlx4_cmd_info *cmd)
3030 int eqn = vhcr->in_modifier;
3031 int res_id = eqn | (slave << 8);
3035 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3039 if (eq->com.from_state != RES_EQ_HW) {
3044 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3047 put_res(dev, slave, res_id, RES_EQ);
3051 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3052 struct mlx4_vhcr *vhcr,
3053 struct mlx4_cmd_mailbox *inbox,
3054 struct mlx4_cmd_mailbox *outbox,
3055 struct mlx4_cmd_info *cmd)
3058 int cqn = vhcr->in_modifier;
3059 struct mlx4_cq_context *cqc = inbox->buf;
3060 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3062 struct res_mtt *mtt;
3064 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3067 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3070 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3073 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3076 atomic_inc(&mtt->ref_count);
3078 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3079 res_end_move(dev, slave, RES_CQ, cqn);
3083 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3085 res_abort_move(dev, slave, RES_CQ, cqn);
3089 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3090 struct mlx4_vhcr *vhcr,
3091 struct mlx4_cmd_mailbox *inbox,
3092 struct mlx4_cmd_mailbox *outbox,
3093 struct mlx4_cmd_info *cmd)
3096 int cqn = vhcr->in_modifier;
3099 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3102 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3105 atomic_dec(&cq->mtt->ref_count);
3106 res_end_move(dev, slave, RES_CQ, cqn);
3110 res_abort_move(dev, slave, RES_CQ, cqn);
3114 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3115 struct mlx4_vhcr *vhcr,
3116 struct mlx4_cmd_mailbox *inbox,
3117 struct mlx4_cmd_mailbox *outbox,
3118 struct mlx4_cmd_info *cmd)
3120 int cqn = vhcr->in_modifier;
3124 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3128 if (cq->com.from_state != RES_CQ_HW)
3131 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3133 put_res(dev, slave, cqn, RES_CQ);
3138 static int handle_resize(struct mlx4_dev *dev, int slave,
3139 struct mlx4_vhcr *vhcr,
3140 struct mlx4_cmd_mailbox *inbox,
3141 struct mlx4_cmd_mailbox *outbox,
3142 struct mlx4_cmd_info *cmd,
3146 struct res_mtt *orig_mtt;
3147 struct res_mtt *mtt;
3148 struct mlx4_cq_context *cqc = inbox->buf;
3149 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3151 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3155 if (orig_mtt != cq->mtt) {
3160 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3164 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3167 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3170 atomic_dec(&orig_mtt->ref_count);
3171 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3172 atomic_inc(&mtt->ref_count);
3174 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3178 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3180 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3186 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3187 struct mlx4_vhcr *vhcr,
3188 struct mlx4_cmd_mailbox *inbox,
3189 struct mlx4_cmd_mailbox *outbox,
3190 struct mlx4_cmd_info *cmd)
3192 int cqn = vhcr->in_modifier;
3196 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3200 if (cq->com.from_state != RES_CQ_HW)
3203 if (vhcr->op_modifier == 0) {
3204 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3208 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3210 put_res(dev, slave, cqn, RES_CQ);
3215 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3217 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3218 int log_rq_stride = srqc->logstride & 7;
3219 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3221 if (log_srq_size + log_rq_stride + 4 < page_shift)
3224 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3227 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3228 struct mlx4_vhcr *vhcr,
3229 struct mlx4_cmd_mailbox *inbox,
3230 struct mlx4_cmd_mailbox *outbox,
3231 struct mlx4_cmd_info *cmd)
3234 int srqn = vhcr->in_modifier;
3235 struct res_mtt *mtt;
3236 struct res_srq *srq;
3237 struct mlx4_srq_context *srqc = inbox->buf;
3238 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3240 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3243 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3246 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3249 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3254 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3258 atomic_inc(&mtt->ref_count);
3260 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3261 res_end_move(dev, slave, RES_SRQ, srqn);
3265 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3267 res_abort_move(dev, slave, RES_SRQ, srqn);
3272 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3273 struct mlx4_vhcr *vhcr,
3274 struct mlx4_cmd_mailbox *inbox,
3275 struct mlx4_cmd_mailbox *outbox,
3276 struct mlx4_cmd_info *cmd)
3279 int srqn = vhcr->in_modifier;
3280 struct res_srq *srq;
3282 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3285 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3288 atomic_dec(&srq->mtt->ref_count);
3290 atomic_dec(&srq->cq->ref_count);
3291 res_end_move(dev, slave, RES_SRQ, srqn);
3296 res_abort_move(dev, slave, RES_SRQ, srqn);
3301 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3302 struct mlx4_vhcr *vhcr,
3303 struct mlx4_cmd_mailbox *inbox,
3304 struct mlx4_cmd_mailbox *outbox,
3305 struct mlx4_cmd_info *cmd)
3308 int srqn = vhcr->in_modifier;
3309 struct res_srq *srq;
3311 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3314 if (srq->com.from_state != RES_SRQ_HW) {
3318 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3320 put_res(dev, slave, srqn, RES_SRQ);
3324 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3325 struct mlx4_vhcr *vhcr,
3326 struct mlx4_cmd_mailbox *inbox,
3327 struct mlx4_cmd_mailbox *outbox,
3328 struct mlx4_cmd_info *cmd)
3331 int srqn = vhcr->in_modifier;
3332 struct res_srq *srq;
3334 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3338 if (srq->com.from_state != RES_SRQ_HW) {
3343 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3345 put_res(dev, slave, srqn, RES_SRQ);
3349 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3350 struct mlx4_vhcr *vhcr,
3351 struct mlx4_cmd_mailbox *inbox,
3352 struct mlx4_cmd_mailbox *outbox,
3353 struct mlx4_cmd_info *cmd)
3356 int qpn = vhcr->in_modifier & 0x7fffff;
3359 err = get_res(dev, slave, qpn, RES_QP, &qp);
3362 if (qp->com.from_state != RES_QP_HW) {
3367 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3369 put_res(dev, slave, qpn, RES_QP);
3373 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3374 struct mlx4_vhcr *vhcr,
3375 struct mlx4_cmd_mailbox *inbox,
3376 struct mlx4_cmd_mailbox *outbox,
3377 struct mlx4_cmd_info *cmd)
3379 struct mlx4_qp_context *context = inbox->buf + 8;
3380 adjust_proxy_tun_qkey(dev, vhcr, context);
3381 update_pkey_index(dev, slave, inbox);
3382 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3385 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3386 struct mlx4_qp_context *qpc,
3387 struct mlx4_cmd_mailbox *inbox)
3389 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3391 int port = mlx4_slave_convert_port(
3392 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3397 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3400 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3401 mlx4_is_eth(dev, port + 1)) {
3402 qpc->pri_path.sched_queue = pri_sched_queue;
3405 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3406 port = mlx4_slave_convert_port(
3407 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3411 qpc->alt_path.sched_queue =
3412 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3418 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3419 struct mlx4_qp_context *qpc,
3420 struct mlx4_cmd_mailbox *inbox)
3424 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3425 u8 sched = *(u8 *)(inbox->buf + 64);
3428 port = (sched >> 6 & 1) + 1;
3429 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3430 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3431 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3437 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3438 struct mlx4_vhcr *vhcr,
3439 struct mlx4_cmd_mailbox *inbox,
3440 struct mlx4_cmd_mailbox *outbox,
3441 struct mlx4_cmd_info *cmd)
3444 struct mlx4_qp_context *qpc = inbox->buf + 8;
3445 int qpn = vhcr->in_modifier & 0x7fffff;
3447 u8 orig_sched_queue;
3448 __be32 orig_param3 = qpc->param3;
3449 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3450 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3451 u8 orig_pri_path_fl = qpc->pri_path.fl;
3452 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3453 u8 orig_feup = qpc->pri_path.feup;
3455 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3458 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3462 if (roce_verify_mac(dev, slave, qpc, inbox))
3465 update_pkey_index(dev, slave, inbox);
3466 update_gid(dev, inbox, (u8)slave);
3467 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3468 orig_sched_queue = qpc->pri_path.sched_queue;
3469 err = update_vport_qp_param(dev, inbox, slave, qpn);
3473 err = get_res(dev, slave, qpn, RES_QP, &qp);
3476 if (qp->com.from_state != RES_QP_HW) {
3481 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3483 /* if no error, save sched queue value passed in by VF. This is
3484 * essentially the QOS value provided by the VF. This will be useful
3485 * if we allow dynamic changes from VST back to VGT
3488 qp->sched_queue = orig_sched_queue;
3489 qp->param3 = orig_param3;
3490 qp->vlan_control = orig_vlan_control;
3491 qp->fvl_rx = orig_fvl_rx;
3492 qp->pri_path_fl = orig_pri_path_fl;
3493 qp->vlan_index = orig_vlan_index;
3494 qp->feup = orig_feup;
3496 put_res(dev, slave, qpn, RES_QP);
3500 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3501 struct mlx4_vhcr *vhcr,
3502 struct mlx4_cmd_mailbox *inbox,
3503 struct mlx4_cmd_mailbox *outbox,
3504 struct mlx4_cmd_info *cmd)
3507 struct mlx4_qp_context *context = inbox->buf + 8;
3509 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3512 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3516 update_pkey_index(dev, slave, inbox);
3517 update_gid(dev, inbox, (u8)slave);
3518 adjust_proxy_tun_qkey(dev, vhcr, context);
3519 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3522 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3523 struct mlx4_vhcr *vhcr,
3524 struct mlx4_cmd_mailbox *inbox,
3525 struct mlx4_cmd_mailbox *outbox,
3526 struct mlx4_cmd_info *cmd)
3529 struct mlx4_qp_context *context = inbox->buf + 8;
3531 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3534 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3538 update_pkey_index(dev, slave, inbox);
3539 update_gid(dev, inbox, (u8)slave);
3540 adjust_proxy_tun_qkey(dev, vhcr, context);
3541 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3545 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3546 struct mlx4_vhcr *vhcr,
3547 struct mlx4_cmd_mailbox *inbox,
3548 struct mlx4_cmd_mailbox *outbox,
3549 struct mlx4_cmd_info *cmd)
3551 struct mlx4_qp_context *context = inbox->buf + 8;
3552 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3555 adjust_proxy_tun_qkey(dev, vhcr, context);
3556 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3559 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3560 struct mlx4_vhcr *vhcr,
3561 struct mlx4_cmd_mailbox *inbox,
3562 struct mlx4_cmd_mailbox *outbox,
3563 struct mlx4_cmd_info *cmd)
3566 struct mlx4_qp_context *context = inbox->buf + 8;
3568 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3571 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3575 adjust_proxy_tun_qkey(dev, vhcr, context);
3576 update_gid(dev, inbox, (u8)slave);
3577 update_pkey_index(dev, slave, inbox);
3578 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3581 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3582 struct mlx4_vhcr *vhcr,
3583 struct mlx4_cmd_mailbox *inbox,
3584 struct mlx4_cmd_mailbox *outbox,
3585 struct mlx4_cmd_info *cmd)
3588 struct mlx4_qp_context *context = inbox->buf + 8;
3590 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3593 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3597 adjust_proxy_tun_qkey(dev, vhcr, context);
3598 update_gid(dev, inbox, (u8)slave);
3599 update_pkey_index(dev, slave, inbox);
3600 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3603 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3604 struct mlx4_vhcr *vhcr,
3605 struct mlx4_cmd_mailbox *inbox,
3606 struct mlx4_cmd_mailbox *outbox,
3607 struct mlx4_cmd_info *cmd)
3610 int qpn = vhcr->in_modifier & 0x7fffff;
3613 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3616 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3620 atomic_dec(&qp->mtt->ref_count);
3621 atomic_dec(&qp->rcq->ref_count);
3622 atomic_dec(&qp->scq->ref_count);
3624 atomic_dec(&qp->srq->ref_count);
3625 res_end_move(dev, slave, RES_QP, qpn);
3629 res_abort_move(dev, slave, RES_QP, qpn);
3634 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3635 struct res_qp *rqp, u8 *gid)
3637 struct res_gid *res;
3639 list_for_each_entry(res, &rqp->mcg_list, list) {
3640 if (!memcmp(res->gid, gid, 16))
3646 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3647 u8 *gid, enum mlx4_protocol prot,
3648 enum mlx4_steer_type steer, u64 reg_id)
3650 struct res_gid *res;
3653 res = kzalloc(sizeof *res, GFP_KERNEL);
3657 spin_lock_irq(&rqp->mcg_spl);
3658 if (find_gid(dev, slave, rqp, gid)) {
3662 memcpy(res->gid, gid, 16);
3665 res->reg_id = reg_id;
3666 list_add_tail(&res->list, &rqp->mcg_list);
3669 spin_unlock_irq(&rqp->mcg_spl);
3674 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3675 u8 *gid, enum mlx4_protocol prot,
3676 enum mlx4_steer_type steer, u64 *reg_id)
3678 struct res_gid *res;
3681 spin_lock_irq(&rqp->mcg_spl);
3682 res = find_gid(dev, slave, rqp, gid);
3683 if (!res || res->prot != prot || res->steer != steer)
3686 *reg_id = res->reg_id;
3687 list_del(&res->list);
3691 spin_unlock_irq(&rqp->mcg_spl);
3696 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3697 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3698 enum mlx4_steer_type type, u64 *reg_id)
3700 switch (dev->caps.steering_mode) {
3701 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3702 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3705 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3706 block_loopback, prot,
3709 case MLX4_STEERING_MODE_B0:
3710 if (prot == MLX4_PROT_ETH) {
3711 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3716 return mlx4_qp_attach_common(dev, qp, gid,
3717 block_loopback, prot, type);
3723 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3724 u8 gid[16], enum mlx4_protocol prot,
3725 enum mlx4_steer_type type, u64 reg_id)
3727 switch (dev->caps.steering_mode) {
3728 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3729 return mlx4_flow_detach(dev, reg_id);
3730 case MLX4_STEERING_MODE_B0:
3731 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3737 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3738 u8 *gid, enum mlx4_protocol prot)
3742 if (prot != MLX4_PROT_ETH)
3745 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3746 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3747 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3756 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3757 struct mlx4_vhcr *vhcr,
3758 struct mlx4_cmd_mailbox *inbox,
3759 struct mlx4_cmd_mailbox *outbox,
3760 struct mlx4_cmd_info *cmd)
3762 struct mlx4_qp qp; /* dummy for calling attach/detach */
3763 u8 *gid = inbox->buf;
3764 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3769 int attach = vhcr->op_modifier;
3770 int block_loopback = vhcr->in_modifier >> 31;
3771 u8 steer_type_mask = 2;
3772 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3774 qpn = vhcr->in_modifier & 0xffffff;
3775 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3781 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3784 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3787 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3791 err = mlx4_adjust_port(dev, slave, gid, prot);
3795 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3799 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3801 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3804 put_res(dev, slave, qpn, RES_QP);
3808 qp_detach(dev, &qp, gid, prot, type, reg_id);
3810 put_res(dev, slave, qpn, RES_QP);
3815 * MAC validation for Flow Steering rules.
3816 * VF can attach rules only with a mac address which is assigned to it.
3818 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3819 struct list_head *rlist)
3821 struct mac_res *res, *tmp;
3824 /* make sure it isn't multicast or broadcast mac*/
3825 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3826 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3827 list_for_each_entry_safe(res, tmp, rlist, list) {
3828 be_mac = cpu_to_be64(res->mac << 16);
3829 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3832 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3833 eth_header->eth.dst_mac, slave);
3840 * In case of missing eth header, append eth header with a MAC address
3841 * assigned to the VF.
3843 static int add_eth_header(struct mlx4_dev *dev, int slave,
3844 struct mlx4_cmd_mailbox *inbox,
3845 struct list_head *rlist, int header_id)
3847 struct mac_res *res, *tmp;
3849 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3850 struct mlx4_net_trans_rule_hw_eth *eth_header;
3851 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3852 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3854 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3856 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3858 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3860 /* Clear a space in the inbox for eth header */
3861 switch (header_id) {
3862 case MLX4_NET_TRANS_RULE_ID_IPV4:
3864 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3865 memmove(ip_header, eth_header,
3866 sizeof(*ip_header) + sizeof(*l4_header));
3868 case MLX4_NET_TRANS_RULE_ID_TCP:
3869 case MLX4_NET_TRANS_RULE_ID_UDP:
3870 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3872 memmove(l4_header, eth_header, sizeof(*l4_header));
3877 list_for_each_entry_safe(res, tmp, rlist, list) {
3878 if (port == res->port) {
3879 be_mac = cpu_to_be64(res->mac << 16);
3884 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3889 memset(eth_header, 0, sizeof(*eth_header));
3890 eth_header->size = sizeof(*eth_header) >> 2;
3891 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3892 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3893 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3899 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3900 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3901 struct mlx4_vhcr *vhcr,
3902 struct mlx4_cmd_mailbox *inbox,
3903 struct mlx4_cmd_mailbox *outbox,
3904 struct mlx4_cmd_info *cmd_info)
3907 u32 qpn = vhcr->in_modifier & 0xffffff;
3911 u64 pri_addr_path_mask;
3912 struct mlx4_update_qp_context *cmd;
3915 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3917 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3918 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3919 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3922 /* Just change the smac for the QP */
3923 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3925 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3929 port = (rqp->sched_queue >> 6 & 1) + 1;
3930 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3931 err = mac_find_smac_ix_in_slave(dev, slave, port,
3934 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3939 err = mlx4_cmd(dev, inbox->dma,
3940 vhcr->in_modifier, 0,
3941 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3944 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3949 put_res(dev, slave, qpn, RES_QP);
3953 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3954 struct mlx4_vhcr *vhcr,
3955 struct mlx4_cmd_mailbox *inbox,
3956 struct mlx4_cmd_mailbox *outbox,
3957 struct mlx4_cmd_info *cmd)
3960 struct mlx4_priv *priv = mlx4_priv(dev);
3961 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3962 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3966 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3967 struct _rule_hw *rule_header;
3970 if (dev->caps.steering_mode !=
3971 MLX4_STEERING_MODE_DEVICE_MANAGED)
3974 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3975 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
3976 if (ctrl->port <= 0)
3978 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3979 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3981 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
3984 rule_header = (struct _rule_hw *)(ctrl + 1);
3985 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3987 switch (header_id) {
3988 case MLX4_NET_TRANS_RULE_ID_ETH:
3989 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3994 case MLX4_NET_TRANS_RULE_ID_IB:
3996 case MLX4_NET_TRANS_RULE_ID_IPV4:
3997 case MLX4_NET_TRANS_RULE_ID_TCP:
3998 case MLX4_NET_TRANS_RULE_ID_UDP:
3999 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4000 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4004 vhcr->in_modifier +=
4005 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4008 pr_err("Corrupted mailbox\n");
4013 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4014 vhcr->in_modifier, 0,
4015 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4020 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4022 mlx4_err(dev, "Fail to add flow steering resources\n");
4024 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4025 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4029 atomic_inc(&rqp->ref_count);
4031 put_res(dev, slave, qpn, RES_QP);
4035 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4036 struct mlx4_vhcr *vhcr,
4037 struct mlx4_cmd_mailbox *inbox,
4038 struct mlx4_cmd_mailbox *outbox,
4039 struct mlx4_cmd_info *cmd)
4043 struct res_fs_rule *rrule;
4045 if (dev->caps.steering_mode !=
4046 MLX4_STEERING_MODE_DEVICE_MANAGED)
4049 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4052 /* Release the rule form busy state before removal */
4053 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4054 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4058 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4060 mlx4_err(dev, "Fail to remove flow steering resources\n");
4064 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4065 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4068 atomic_dec(&rqp->ref_count);
4070 put_res(dev, slave, rrule->qpn, RES_QP);
4075 BUSY_MAX_RETRIES = 10
4078 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4079 struct mlx4_vhcr *vhcr,
4080 struct mlx4_cmd_mailbox *inbox,
4081 struct mlx4_cmd_mailbox *outbox,
4082 struct mlx4_cmd_info *cmd)
4085 int index = vhcr->in_modifier & 0xffff;
4087 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4091 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4092 put_res(dev, slave, index, RES_COUNTER);
4096 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4098 struct res_gid *rgid;
4099 struct res_gid *tmp;
4100 struct mlx4_qp qp; /* dummy for calling attach/detach */
4102 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4103 switch (dev->caps.steering_mode) {
4104 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4105 mlx4_flow_detach(dev, rgid->reg_id);
4107 case MLX4_STEERING_MODE_B0:
4108 qp.qpn = rqp->local_qpn;
4109 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4110 rgid->prot, rgid->steer);
4113 list_del(&rgid->list);
4118 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4119 enum mlx4_resource type, int print)
4121 struct mlx4_priv *priv = mlx4_priv(dev);
4122 struct mlx4_resource_tracker *tracker =
4123 &priv->mfunc.master.res_tracker;
4124 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4125 struct res_common *r;
4126 struct res_common *tmp;
4130 spin_lock_irq(mlx4_tlock(dev));
4131 list_for_each_entry_safe(r, tmp, rlist, list) {
4132 if (r->owner == slave) {
4134 if (r->state == RES_ANY_BUSY) {
4137 "%s id 0x%llx is busy\n",
4142 r->from_state = r->state;
4143 r->state = RES_ANY_BUSY;
4149 spin_unlock_irq(mlx4_tlock(dev));
4154 static int move_all_busy(struct mlx4_dev *dev, int slave,
4155 enum mlx4_resource type)
4157 unsigned long begin;
4162 busy = _move_all_busy(dev, slave, type, 0);
4163 if (time_after(jiffies, begin + 5 * HZ))
4170 busy = _move_all_busy(dev, slave, type, 1);
4174 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4176 struct mlx4_priv *priv = mlx4_priv(dev);
4177 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4178 struct list_head *qp_list =
4179 &tracker->slave_list[slave].res_list[RES_QP];
4187 err = move_all_busy(dev, slave, RES_QP);
4189 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4192 spin_lock_irq(mlx4_tlock(dev));
4193 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4194 spin_unlock_irq(mlx4_tlock(dev));
4195 if (qp->com.owner == slave) {
4196 qpn = qp->com.res_id;
4197 detach_qp(dev, slave, qp);
4198 state = qp->com.from_state;
4199 while (state != 0) {
4201 case RES_QP_RESERVED:
4202 spin_lock_irq(mlx4_tlock(dev));
4203 rb_erase(&qp->com.node,
4204 &tracker->res_tree[RES_QP]);
4205 list_del(&qp->com.list);
4206 spin_unlock_irq(mlx4_tlock(dev));
4207 if (!valid_reserved(dev, slave, qpn)) {
4208 __mlx4_qp_release_range(dev, qpn, 1);
4209 mlx4_release_resource(dev, slave,
4216 if (!valid_reserved(dev, slave, qpn))
4217 __mlx4_qp_free_icm(dev, qpn);
4218 state = RES_QP_RESERVED;
4222 err = mlx4_cmd(dev, in_param,
4225 MLX4_CMD_TIME_CLASS_A,
4228 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4229 slave, qp->local_qpn);
4230 atomic_dec(&qp->rcq->ref_count);
4231 atomic_dec(&qp->scq->ref_count);
4232 atomic_dec(&qp->mtt->ref_count);
4234 atomic_dec(&qp->srq->ref_count);
4235 state = RES_QP_MAPPED;
4242 spin_lock_irq(mlx4_tlock(dev));
4244 spin_unlock_irq(mlx4_tlock(dev));
4247 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4249 struct mlx4_priv *priv = mlx4_priv(dev);
4250 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4251 struct list_head *srq_list =
4252 &tracker->slave_list[slave].res_list[RES_SRQ];
4253 struct res_srq *srq;
4254 struct res_srq *tmp;
4261 err = move_all_busy(dev, slave, RES_SRQ);
4263 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4266 spin_lock_irq(mlx4_tlock(dev));
4267 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4268 spin_unlock_irq(mlx4_tlock(dev));
4269 if (srq->com.owner == slave) {
4270 srqn = srq->com.res_id;
4271 state = srq->com.from_state;
4272 while (state != 0) {
4274 case RES_SRQ_ALLOCATED:
4275 __mlx4_srq_free_icm(dev, srqn);
4276 spin_lock_irq(mlx4_tlock(dev));
4277 rb_erase(&srq->com.node,
4278 &tracker->res_tree[RES_SRQ]);
4279 list_del(&srq->com.list);
4280 spin_unlock_irq(mlx4_tlock(dev));
4281 mlx4_release_resource(dev, slave,
4289 err = mlx4_cmd(dev, in_param, srqn, 1,
4291 MLX4_CMD_TIME_CLASS_A,
4294 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4297 atomic_dec(&srq->mtt->ref_count);
4299 atomic_dec(&srq->cq->ref_count);
4300 state = RES_SRQ_ALLOCATED;
4308 spin_lock_irq(mlx4_tlock(dev));
4310 spin_unlock_irq(mlx4_tlock(dev));
4313 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4315 struct mlx4_priv *priv = mlx4_priv(dev);
4316 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4317 struct list_head *cq_list =
4318 &tracker->slave_list[slave].res_list[RES_CQ];
4327 err = move_all_busy(dev, slave, RES_CQ);
4329 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4332 spin_lock_irq(mlx4_tlock(dev));
4333 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4334 spin_unlock_irq(mlx4_tlock(dev));
4335 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4336 cqn = cq->com.res_id;
4337 state = cq->com.from_state;
4338 while (state != 0) {
4340 case RES_CQ_ALLOCATED:
4341 __mlx4_cq_free_icm(dev, cqn);
4342 spin_lock_irq(mlx4_tlock(dev));
4343 rb_erase(&cq->com.node,
4344 &tracker->res_tree[RES_CQ]);
4345 list_del(&cq->com.list);
4346 spin_unlock_irq(mlx4_tlock(dev));
4347 mlx4_release_resource(dev, slave,
4355 err = mlx4_cmd(dev, in_param, cqn, 1,
4357 MLX4_CMD_TIME_CLASS_A,
4360 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4362 atomic_dec(&cq->mtt->ref_count);
4363 state = RES_CQ_ALLOCATED;
4371 spin_lock_irq(mlx4_tlock(dev));
4373 spin_unlock_irq(mlx4_tlock(dev));
4376 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4378 struct mlx4_priv *priv = mlx4_priv(dev);
4379 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4380 struct list_head *mpt_list =
4381 &tracker->slave_list[slave].res_list[RES_MPT];
4382 struct res_mpt *mpt;
4383 struct res_mpt *tmp;
4390 err = move_all_busy(dev, slave, RES_MPT);
4392 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4395 spin_lock_irq(mlx4_tlock(dev));
4396 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4397 spin_unlock_irq(mlx4_tlock(dev));
4398 if (mpt->com.owner == slave) {
4399 mptn = mpt->com.res_id;
4400 state = mpt->com.from_state;
4401 while (state != 0) {
4403 case RES_MPT_RESERVED:
4404 __mlx4_mpt_release(dev, mpt->key);
4405 spin_lock_irq(mlx4_tlock(dev));
4406 rb_erase(&mpt->com.node,
4407 &tracker->res_tree[RES_MPT]);
4408 list_del(&mpt->com.list);
4409 spin_unlock_irq(mlx4_tlock(dev));
4410 mlx4_release_resource(dev, slave,
4416 case RES_MPT_MAPPED:
4417 __mlx4_mpt_free_icm(dev, mpt->key);
4418 state = RES_MPT_RESERVED;
4423 err = mlx4_cmd(dev, in_param, mptn, 0,
4425 MLX4_CMD_TIME_CLASS_A,
4428 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4431 atomic_dec(&mpt->mtt->ref_count);
4432 state = RES_MPT_MAPPED;
4439 spin_lock_irq(mlx4_tlock(dev));
4441 spin_unlock_irq(mlx4_tlock(dev));
4444 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4446 struct mlx4_priv *priv = mlx4_priv(dev);
4447 struct mlx4_resource_tracker *tracker =
4448 &priv->mfunc.master.res_tracker;
4449 struct list_head *mtt_list =
4450 &tracker->slave_list[slave].res_list[RES_MTT];
4451 struct res_mtt *mtt;
4452 struct res_mtt *tmp;
4458 err = move_all_busy(dev, slave, RES_MTT);
4460 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4463 spin_lock_irq(mlx4_tlock(dev));
4464 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4465 spin_unlock_irq(mlx4_tlock(dev));
4466 if (mtt->com.owner == slave) {
4467 base = mtt->com.res_id;
4468 state = mtt->com.from_state;
4469 while (state != 0) {
4471 case RES_MTT_ALLOCATED:
4472 __mlx4_free_mtt_range(dev, base,
4474 spin_lock_irq(mlx4_tlock(dev));
4475 rb_erase(&mtt->com.node,
4476 &tracker->res_tree[RES_MTT]);
4477 list_del(&mtt->com.list);
4478 spin_unlock_irq(mlx4_tlock(dev));
4479 mlx4_release_resource(dev, slave, RES_MTT,
4480 1 << mtt->order, 0);
4490 spin_lock_irq(mlx4_tlock(dev));
4492 spin_unlock_irq(mlx4_tlock(dev));
4495 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4497 struct mlx4_priv *priv = mlx4_priv(dev);
4498 struct mlx4_resource_tracker *tracker =
4499 &priv->mfunc.master.res_tracker;
4500 struct list_head *fs_rule_list =
4501 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4502 struct res_fs_rule *fs_rule;
4503 struct res_fs_rule *tmp;
4508 err = move_all_busy(dev, slave, RES_FS_RULE);
4510 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4513 spin_lock_irq(mlx4_tlock(dev));
4514 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4515 spin_unlock_irq(mlx4_tlock(dev));
4516 if (fs_rule->com.owner == slave) {
4517 base = fs_rule->com.res_id;
4518 state = fs_rule->com.from_state;
4519 while (state != 0) {
4521 case RES_FS_RULE_ALLOCATED:
4523 err = mlx4_cmd(dev, base, 0, 0,
4524 MLX4_QP_FLOW_STEERING_DETACH,
4525 MLX4_CMD_TIME_CLASS_A,
4528 spin_lock_irq(mlx4_tlock(dev));
4529 rb_erase(&fs_rule->com.node,
4530 &tracker->res_tree[RES_FS_RULE]);
4531 list_del(&fs_rule->com.list);
4532 spin_unlock_irq(mlx4_tlock(dev));
4542 spin_lock_irq(mlx4_tlock(dev));
4544 spin_unlock_irq(mlx4_tlock(dev));
4547 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4549 struct mlx4_priv *priv = mlx4_priv(dev);
4550 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4551 struct list_head *eq_list =
4552 &tracker->slave_list[slave].res_list[RES_EQ];
4559 struct mlx4_cmd_mailbox *mailbox;
4561 err = move_all_busy(dev, slave, RES_EQ);
4563 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4566 spin_lock_irq(mlx4_tlock(dev));
4567 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4568 spin_unlock_irq(mlx4_tlock(dev));
4569 if (eq->com.owner == slave) {
4570 eqn = eq->com.res_id;
4571 state = eq->com.from_state;
4572 while (state != 0) {
4574 case RES_EQ_RESERVED:
4575 spin_lock_irq(mlx4_tlock(dev));
4576 rb_erase(&eq->com.node,
4577 &tracker->res_tree[RES_EQ]);
4578 list_del(&eq->com.list);
4579 spin_unlock_irq(mlx4_tlock(dev));
4585 mailbox = mlx4_alloc_cmd_mailbox(dev);
4586 if (IS_ERR(mailbox)) {
4590 err = mlx4_cmd_box(dev, slave, 0,
4593 MLX4_CMD_TIME_CLASS_A,
4596 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4598 mlx4_free_cmd_mailbox(dev, mailbox);
4599 atomic_dec(&eq->mtt->ref_count);
4600 state = RES_EQ_RESERVED;
4608 spin_lock_irq(mlx4_tlock(dev));
4610 spin_unlock_irq(mlx4_tlock(dev));
4613 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4615 struct mlx4_priv *priv = mlx4_priv(dev);
4616 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4617 struct list_head *counter_list =
4618 &tracker->slave_list[slave].res_list[RES_COUNTER];
4619 struct res_counter *counter;
4620 struct res_counter *tmp;
4624 err = move_all_busy(dev, slave, RES_COUNTER);
4626 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4629 spin_lock_irq(mlx4_tlock(dev));
4630 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4631 if (counter->com.owner == slave) {
4632 index = counter->com.res_id;
4633 rb_erase(&counter->com.node,
4634 &tracker->res_tree[RES_COUNTER]);
4635 list_del(&counter->com.list);
4637 __mlx4_counter_free(dev, index);
4638 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4641 spin_unlock_irq(mlx4_tlock(dev));
4644 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4646 struct mlx4_priv *priv = mlx4_priv(dev);
4647 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4648 struct list_head *xrcdn_list =
4649 &tracker->slave_list[slave].res_list[RES_XRCD];
4650 struct res_xrcdn *xrcd;
4651 struct res_xrcdn *tmp;
4655 err = move_all_busy(dev, slave, RES_XRCD);
4657 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4660 spin_lock_irq(mlx4_tlock(dev));
4661 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4662 if (xrcd->com.owner == slave) {
4663 xrcdn = xrcd->com.res_id;
4664 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4665 list_del(&xrcd->com.list);
4667 __mlx4_xrcd_free(dev, xrcdn);
4670 spin_unlock_irq(mlx4_tlock(dev));
4673 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4675 struct mlx4_priv *priv = mlx4_priv(dev);
4676 mlx4_reset_roce_gids(dev, slave);
4677 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4678 rem_slave_vlans(dev, slave);
4679 rem_slave_macs(dev, slave);
4680 rem_slave_fs_rule(dev, slave);
4681 rem_slave_qps(dev, slave);
4682 rem_slave_srqs(dev, slave);
4683 rem_slave_cqs(dev, slave);
4684 rem_slave_mrs(dev, slave);
4685 rem_slave_eqs(dev, slave);
4686 rem_slave_mtts(dev, slave);
4687 rem_slave_counters(dev, slave);
4688 rem_slave_xrcdns(dev, slave);
4689 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4692 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4694 struct mlx4_vf_immed_vlan_work *work =
4695 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4696 struct mlx4_cmd_mailbox *mailbox;
4697 struct mlx4_update_qp_context *upd_context;
4698 struct mlx4_dev *dev = &work->priv->dev;
4699 struct mlx4_resource_tracker *tracker =
4700 &work->priv->mfunc.master.res_tracker;
4701 struct list_head *qp_list =
4702 &tracker->slave_list[work->slave].res_list[RES_QP];
4705 u64 qp_path_mask_vlan_ctrl =
4706 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4707 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4708 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4709 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4710 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4711 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4713 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4714 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4715 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4716 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4717 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4718 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4719 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4722 int port, errors = 0;
4725 if (mlx4_is_slave(dev)) {
4726 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4731 mailbox = mlx4_alloc_cmd_mailbox(dev);
4732 if (IS_ERR(mailbox))
4734 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4735 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4736 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4737 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4738 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4739 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4740 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4741 else if (!work->vlan_id)
4742 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4743 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4745 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4746 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4747 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4749 upd_context = mailbox->buf;
4750 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4752 spin_lock_irq(mlx4_tlock(dev));
4753 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4754 spin_unlock_irq(mlx4_tlock(dev));
4755 if (qp->com.owner == work->slave) {
4756 if (qp->com.from_state != RES_QP_HW ||
4757 !qp->sched_queue || /* no INIT2RTR trans yet */
4758 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4759 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4760 spin_lock_irq(mlx4_tlock(dev));
4763 port = (qp->sched_queue >> 6 & 1) + 1;
4764 if (port != work->port) {
4765 spin_lock_irq(mlx4_tlock(dev));
4768 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4769 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4771 upd_context->primary_addr_path_mask =
4772 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4773 if (work->vlan_id == MLX4_VGT) {
4774 upd_context->qp_context.param3 = qp->param3;
4775 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4776 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4777 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4778 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4779 upd_context->qp_context.pri_path.feup = qp->feup;
4780 upd_context->qp_context.pri_path.sched_queue =
4783 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4784 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4785 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4786 upd_context->qp_context.pri_path.fvl_rx =
4787 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4788 upd_context->qp_context.pri_path.fl =
4789 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4790 upd_context->qp_context.pri_path.feup =
4791 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4792 upd_context->qp_context.pri_path.sched_queue =
4793 qp->sched_queue & 0xC7;
4794 upd_context->qp_context.pri_path.sched_queue |=
4795 ((work->qos & 0x7) << 3);
4798 err = mlx4_cmd(dev, mailbox->dma,
4799 qp->local_qpn & 0xffffff,
4800 0, MLX4_CMD_UPDATE_QP,
4801 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4803 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4804 work->slave, port, qp->local_qpn, err);
4808 spin_lock_irq(mlx4_tlock(dev));
4810 spin_unlock_irq(mlx4_tlock(dev));
4811 mlx4_free_cmd_mailbox(dev, mailbox);
4814 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4815 errors, work->slave, work->port);
4817 /* unregister previous vlan_id if needed and we had no errors
4818 * while updating the QPs
4820 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4821 NO_INDX != work->orig_vlan_ix)
4822 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4823 work->orig_vlan_id);