Merge branch 'odp_fixes' into rdma.git for-next
authorJason Gunthorpe <jgg@mellanox.com>
Wed, 21 Aug 2019 17:10:36 +0000 (14:10 -0300)
committerJason Gunthorpe <jgg@mellanox.com>
Wed, 21 Aug 2019 17:10:36 +0000 (14:10 -0300)
Jason Gunthorpe says:

====================
This is a collection of general cleanups for ODP to clarify some of the
flows around umem creation and use of the interval tree.
====================

The branch is based on v5.3-rc5 due to dependencies

* odp_fixes:
  RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr
  RDMA/mlx5: Use ib_umem_start instead of umem.address
  RDMA/core: Make invalidate_range a device operation
  RDMA/odp: Use kvcalloc for the dma_list and page_list
  RDMA/odp: Check for overflow when computing the umem_odp end
  RDMA/odp: Provide ib_umem_odp_release() to undo the allocs
  RDMA/odp: Split creating a umem_odp from ib_umem_get
  RDMA/odp: Make the three ways to create a umem_odp clear
  RMDA/odp: Consolidate umem_odp initialization
  RDMA/odp: Make it clearer when a umem is an implicit ODP umem
  RDMA/odp: Iterate over the whole rbtree directly
  RDMA/odp: Use the common interval tree library instead of generic
  RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
91 files changed:
drivers/infiniband/core/addr.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cma_configfs.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/counters.c
drivers/infiniband/core/cq.c
drivers/infiniband/core/device.c
drivers/infiniband/core/fmr_pool.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/core/netlink.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/efa/efa.h
drivers/infiniband/hw/efa/efa_com.c
drivers/infiniband/hw/efa/efa_com_cmd.c
drivers/infiniband/hw/efa/efa_com_cmd.h
drivers/infiniband/hw/efa/efa_main.c
drivers/infiniband/hw/efa/efa_verbs.c
drivers/infiniband/hw/hfi1/user_sdma.h
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hns/Kconfig
drivers/infiniband/hw/hns/hns_roce_cmd.c
drivers/infiniband/hw/hns/hns_roce_cq.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_hem.h
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/hns/hns_roce_srq.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq_cmd.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/infiniband/sw/rxe/rxe.h
drivers/infiniband/sw/rxe/rxe_param.h
drivers/infiniband/sw/rxe/rxe_verbs.c
drivers/infiniband/sw/siw/siw_verbs.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/rl.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
fs/cifs/smbdirect.c
include/Kbuild
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/qed/qed_rdma_if.h
include/rdma/ib.h
include/rdma/ib_verbs.h
include/rdma/iw_portmap.h
include/rdma/opa_port_info.h
include/rdma/rdma_netlink.h
include/rdma/rdmavt_cq.h
include/rdma/signature.h
net/9p/trans_rdma.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sunrpc/xprtrdma/verbs.c

index 9b76a8fcdd2479bc696184eee6848fde6de8d994..1dd467bed8fc1255d823a9083a68af9724715d66 100644 (file)
@@ -183,7 +183,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
 
        /* Repair the nlmsg header length */
        nlmsg_end(skb, nlh);
-       rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
+       rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
 
        /* Make the request retry, so when we get the response from userspace
         * we will have something.
index 18e476b3ced0718c7efc3e9079dad47fa3637508..00fb3eacda1944d780cc5810624c6ae38a881601 100644 (file)
@@ -810,6 +810,7 @@ static void release_gid_table(struct ib_device *device,
        if (leak)
                return;
 
+       mutex_destroy(&table->lock);
        kfree(table->data_vec);
        kfree(table);
 }
index 3ec2c415bb706f947a0a59ea1c05b8e2b6207384..8b0b5ae22e4c8935385a6c883ccf30eb704044a8 100644 (file)
@@ -342,12 +342,18 @@ static struct configfs_subsystem cma_subsys = {
 
 int __init cma_configfs_init(void)
 {
+       int ret;
+
        config_group_init(&cma_subsys.su_group);
        mutex_init(&cma_subsys.su_mutex);
-       return configfs_register_subsystem(&cma_subsys);
+       ret = configfs_register_subsystem(&cma_subsys);
+       if (ret)
+               mutex_destroy(&cma_subsys.su_mutex);
+       return ret;
 }
 
 void __exit cma_configfs_exit(void)
 {
        configfs_unregister_subsystem(&cma_subsys);
+       mutex_destroy(&cma_subsys.su_mutex);
 }
index beee7b7e0d9acf6526a52f6205950d561f547067..3a8b0911c3bc16193032dcf1c678f18ded9d9f70 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/cgroup_rdma.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include <rdma/ib_verbs.h>
 #include <rdma/opa_addr.h>
@@ -54,8 +56,26 @@ struct pkey_index_qp_list {
        struct list_head    qp_list;
 };
 
+/**
+ * struct rdma_dev_net - rdma net namespace metadata for a net
+ * @nl_sock:   Pointer to netlink socket
+ * @net:       Pointer to owner net namespace
+ * @id:                xarray id to identify the net namespace.
+ */
+struct rdma_dev_net {
+       struct sock *nl_sock;
+       possible_net_t net;
+       u32 id;
+};
+
 extern const struct attribute_group ib_dev_attr_group;
 extern bool ib_devices_shared_netns;
+extern unsigned int rdma_dev_net_id;
+
+static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net)
+{
+       return net_generic(net, rdma_dev_net_id);
+}
 
 int ib_device_register_sysfs(struct ib_device *device);
 void ib_device_unregister_sysfs(struct ib_device *device);
@@ -179,7 +199,6 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
-int rdma_nl_init(void);
 void rdma_nl_exit(void);
 
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
@@ -365,4 +384,7 @@ void ib_port_unregister_module_stat(struct kobject *kobj);
 
 int ib_device_set_netns_put(struct sk_buff *skb,
                            struct ib_device *dev, u32 ns_fd);
+
+int rdma_nl_net_init(struct rdma_dev_net *rnet);
+void rdma_nl_net_exit(struct rdma_dev_net *rnet);
 #endif /* _CORE_PRIV_H */
index b79890739a2c6b14f0d653c1e6e5a61ec3747680..61fcb3a3134001aa3a36887038580e7bfff39e10 100644 (file)
@@ -601,7 +601,7 @@ int rdma_counter_get_mode(struct ib_device *dev, u8 port,
 void rdma_counter_init(struct ib_device *dev)
 {
        struct rdma_port_counter *port_counter;
-       u32 port;
+       u32 port, i;
 
        if (!dev->port_data)
                return;
@@ -622,13 +622,12 @@ void rdma_counter_init(struct ib_device *dev)
        return;
 
 fail:
-       rdma_for_each_port(dev, port) {
+       for (i = port; i >= rdma_start_port(dev); i--) {
                port_counter = &dev->port_data[port].port_counter;
                kfree(port_counter->hstats);
                port_counter->hstats = NULL;
+               mutex_destroy(&port_counter->lock);
        }
-
-       return;
 }
 
 void rdma_counter_release(struct ib_device *dev)
@@ -639,5 +638,6 @@ void rdma_counter_release(struct ib_device *dev)
        rdma_for_each_port(dev, port) {
                port_counter = &dev->port_data[port].port_counter;
                kfree(port_counter->hstats);
+               mutex_destroy(&port_counter->lock);
        }
 }
index 7c599878ccf711e22d70771c67a1b093f101670f..bbfded6d5d3d0d7314b770fdbd3e862b655dd663 100644 (file)
@@ -252,6 +252,34 @@ out_free_cq:
 }
 EXPORT_SYMBOL(__ib_alloc_cq_user);
 
+/**
+ * __ib_alloc_cq_any - allocate a completion queue
+ * @dev:               device to allocate the CQ for
+ * @private:           driver private data, accessible from cq->cq_context
+ * @nr_cqe:            number of CQEs to allocate
+ * @poll_ctx:          context to poll the CQ from
+ * @caller:            module owner name
+ *
+ * Attempt to spread ULP Completion Queues over each device's interrupt
+ * vectors. A simple best-effort mechanism is used.
+ */
+struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
+                               int nr_cqe, enum ib_poll_context poll_ctx,
+                               const char *caller)
+{
+       static atomic_t counter;
+       int comp_vector = 0;
+
+       if (dev->num_comp_vectors > 1)
+               comp_vector =
+                       atomic_inc_return(&counter) %
+                       min_t(int, dev->num_comp_vectors, num_online_cpus());
+
+       return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
+                                 caller, NULL);
+}
+EXPORT_SYMBOL(__ib_alloc_cq_any);
+
 /**
  * ib_free_cq_user - free a completion queue
  * @cq:                completion queue to free.
index b5631b8a03971da6ed8862d035168b87e3f350f3..99c4a55545cfba00812824c87ac56c4b0a2acd20 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/init.h>
 #include <linux/netdevice.h>
 #include <net/net_namespace.h>
-#include <net/netns/generic.h>
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/hashtable.h>
@@ -111,17 +110,7 @@ static void ib_client_put(struct ib_client *client)
  */
 #define CLIENT_DATA_REGISTERED XA_MARK_1
 
-/**
- * struct rdma_dev_net - rdma net namespace metadata for a net
- * @net:       Pointer to owner net namespace
- * @id:                xarray id to identify the net namespace.
- */
-struct rdma_dev_net {
-       possible_net_t net;
-       u32 id;
-};
-
-static unsigned int rdma_dev_net_id;
+unsigned int rdma_dev_net_id;
 
 /*
  * A list of net namespaces is maintained in an xarray. This is necessary
@@ -514,6 +503,9 @@ static void ib_device_release(struct device *device)
                          rcu_head);
        }
 
+       mutex_destroy(&dev->unregistration_lock);
+       mutex_destroy(&dev->compat_devs_mutex);
+
        xa_destroy(&dev->compat_devs);
        xa_destroy(&dev->client_data);
        kfree_rcu(dev, rcu_head);
@@ -1060,7 +1052,7 @@ int rdma_compatdev_set(u8 enable)
 
 static void rdma_dev_exit_net(struct net *net)
 {
-       struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
+       struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
        struct ib_device *dev;
        unsigned long index;
        int ret;
@@ -1094,25 +1086,32 @@ static void rdma_dev_exit_net(struct net *net)
        }
        up_read(&devices_rwsem);
 
+       rdma_nl_net_exit(rnet);
        xa_erase(&rdma_nets, rnet->id);
 }
 
 static __net_init int rdma_dev_init_net(struct net *net)
 {
-       struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id);
+       struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
        unsigned long index;
        struct ib_device *dev;
        int ret;
 
+       write_pnet(&rnet->net, net);
+
+       ret = rdma_nl_net_init(rnet);
+       if (ret)
+               return ret;
+
        /* No need to create any compat devices in default init_net. */
        if (net_eq(net, &init_net))
                return 0;
 
-       write_pnet(&rnet->net, net);
-
        ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
-       if (ret)
+       if (ret) {
+               rdma_nl_net_exit(rnet);
                return ret;
+       }
 
        down_read(&devices_rwsem);
        xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
@@ -1974,31 +1973,64 @@ void ib_dispatch_event(struct ib_event *event)
 }
 EXPORT_SYMBOL(ib_dispatch_event);
 
-/**
- * ib_query_port - Query IB port attributes
- * @device:Device to query
- * @port_num:Port number to query
- * @port_attr:Port attributes
- *
- * ib_query_port() returns the attributes of a port through the
- * @port_attr pointer.
- */
-int ib_query_port(struct ib_device *device,
-                 u8 port_num,
-                 struct ib_port_attr *port_attr)
+static int iw_query_port(struct ib_device *device,
+                          u8 port_num,
+                          struct ib_port_attr *port_attr)
 {
-       union ib_gid gid;
+       struct in_device *inetdev;
+       struct net_device *netdev;
        int err;
 
-       if (!rdma_is_port_valid(device, port_num))
-               return -EINVAL;
+       memset(port_attr, 0, sizeof(*port_attr));
+
+       netdev = ib_device_get_netdev(device, port_num);
+       if (!netdev)
+               return -ENODEV;
+
+       dev_put(netdev);
+
+       port_attr->max_mtu = IB_MTU_4096;
+       port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
+
+       if (!netif_carrier_ok(netdev)) {
+               port_attr->state = IB_PORT_DOWN;
+               port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+       } else {
+               inetdev = in_dev_get(netdev);
+
+               if (inetdev && inetdev->ifa_list) {
+                       port_attr->state = IB_PORT_ACTIVE;
+                       port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+                       in_dev_put(inetdev);
+               } else {
+                       port_attr->state = IB_PORT_INIT;
+                       port_attr->phys_state =
+                               IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
+               }
+       }
+
+       err = device->ops.query_port(device, port_num, port_attr);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int __ib_query_port(struct ib_device *device,
+                          u8 port_num,
+                          struct ib_port_attr *port_attr)
+{
+       union ib_gid gid = {};
+       int err;
 
        memset(port_attr, 0, sizeof(*port_attr));
+
        err = device->ops.query_port(device, port_num, port_attr);
        if (err || port_attr->subnet_prefix)
                return err;
 
-       if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+       if (rdma_port_get_link_layer(device, port_num) !=
+           IB_LINK_LAYER_INFINIBAND)
                return 0;
 
        err = device->ops.query_gid(device, port_num, 0, &gid);
@@ -2008,6 +2040,28 @@ int ib_query_port(struct ib_device *device,
        port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
        return 0;
 }
+
+/**
+ * ib_query_port - Query IB port attributes
+ * @device:Device to query
+ * @port_num:Port number to query
+ * @port_attr:Port attributes
+ *
+ * ib_query_port() returns the attributes of a port through the
+ * @port_attr pointer.
+ */
+int ib_query_port(struct ib_device *device,
+                 u8 port_num,
+                 struct ib_port_attr *port_attr)
+{
+       if (!rdma_is_port_valid(device, port_num))
+               return -EINVAL;
+
+       if (rdma_protocol_iwarp(device, port_num))
+               return iw_query_port(device, port_num, port_attr);
+       else
+               return __ib_query_port(device, port_num, port_attr);
+}
 EXPORT_SYMBOL(ib_query_port);
 
 static void add_ndev_hash(struct ib_port_data *pdata)
@@ -2661,12 +2715,6 @@ static int __init ib_core_init(void)
                goto err_comp_unbound;
        }
 
-       ret = rdma_nl_init();
-       if (ret) {
-               pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
-               goto err_sysfs;
-       }
-
        ret = addr_init();
        if (ret) {
                pr_warn("Could't init IB address resolution\n");
@@ -2712,8 +2760,6 @@ err_mad:
 err_addr:
        addr_cleanup();
 err_ibnl:
-       rdma_nl_exit();
-err_sysfs:
        class_unregister(&ib_class);
 err_comp_unbound:
        destroy_workqueue(ib_comp_unbound_wq);
index 7d841b689a1e575342705bbadbcddbba10420b10..e08aec427027296fe7d1c220722e343e7a87b270 100644 (file)
@@ -148,13 +148,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
                hlist_del_init(&fmr->cache_node);
                fmr->remap_count = 0;
                list_add_tail(&fmr->fmr->list, &fmr_list);
-
-#ifdef DEBUG
-               if (fmr->ref_count !=0) {
-                       pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
-                               fmr, fmr->ref_count);
-               }
-#endif
        }
 
        list_splice_init(&pool->dirty_list, &unmap_list);
@@ -496,12 +489,6 @@ void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
                }
        }
 
-#ifdef DEBUG
-       if (fmr->ref_count < 0)
-               pr_warn(PFX "FMR %p has ref count %d < 0\n",
-                       fmr, fmr->ref_count);
-#endif
-
        spin_unlock_irqrestore(&pool->pool_lock, flags);
 }
 EXPORT_SYMBOL(ib_fmr_pool_unmap);
index 2452b0ddcf0d092ad9121cb8c2348a36d7ec6c1d..f1a873d4e842746f28b2a5bd600eab8b38904729 100644 (file)
@@ -112,7 +112,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
        pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
                __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
 
-       ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
+       ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
        if (ret) {
                skb = NULL; /* skb is freed in the netlink send-op handling */
                iwpm_user_pid = IWPM_PID_UNAVAILABLE;
@@ -202,7 +202,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
        nlmsg_end(skb, nlh);
        nlmsg_request->req_buffer = pm_msg;
 
-       ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
+       ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
        if (ret) {
                skb = NULL; /* skb is freed in the netlink send-op handling */
                iwpm_user_pid = IWPM_PID_UNDEFINED;
@@ -297,7 +297,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
        nlmsg_end(skb, nlh);
        nlmsg_request->req_buffer = pm_msg;
 
-       ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
+       ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
        if (ret) {
                skb = NULL; /* skb is freed in the netlink send-op handling */
                err_str = "Unable to send a nlmsg";
@@ -364,7 +364,7 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
 
        nlmsg_end(skb, nlh);
 
-       ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
+       ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
        if (ret) {
                skb = NULL; /* skb is freed in the netlink send-op handling */
                iwpm_user_pid = IWPM_PID_UNDEFINED;
index 41929bb83739f8d31c53d7ddbda38643dfd3c8b9..c7ad3499228c1e0a116d1a6ad8bba6f301a84970 100644 (file)
@@ -645,7 +645,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
 
        nlmsg_end(skb, nlh);
 
-       ret = rdma_nl_unicast(skb, iwpm_pid);
+       ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
        if (ret) {
                skb = NULL;
                err_str = "Unable to send a nlmsg";
@@ -674,7 +674,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
                return -ENOMEM;
        }
        nlh->nlmsg_type = NLMSG_DONE;
-       ret = rdma_nl_unicast(skb, iwpm_pid);
+       ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
        if (ret)
                pr_warn("%s Unable to send a nlmsg\n", __func__);
        return ret;
@@ -824,7 +824,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
                goto hello_num_error;
        nlmsg_end(skb, nlh);
 
-       ret = rdma_nl_unicast(skb, iwpm_pid);
+       ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
        if (ret) {
                skb = NULL;
                err_str = "Unable to send a nlmsg";
index eecfc0b377c9876f99f5510122031c896060582a..81dbd5f41beda241c1f815077fcad91a7f019f26 100644 (file)
 #include <linux/export.h>
 #include <net/netlink.h>
 #include <net/net_namespace.h>
+#include <net/netns/generic.h>
 #include <net/sock.h>
 #include <rdma/rdma_netlink.h>
 #include <linux/module.h>
 #include "core_priv.h"
 
 static DEFINE_MUTEX(rdma_nl_mutex);
-static struct sock *nls;
 static struct {
        const struct rdma_nl_cbs   *cb_table;
 } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
 
 bool rdma_nl_chk_listeners(unsigned int group)
 {
-       return netlink_has_listeners(nls, group);
+       struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
+
+       return netlink_has_listeners(rnet->nl_sock, group);
 }
 EXPORT_SYMBOL(rdma_nl_chk_listeners);
 
@@ -73,13 +75,21 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
        return (op < max_num_ops[type]) ? true : false;
 }
 
-static bool is_nl_valid(unsigned int type, unsigned int op)
+static bool
+is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
 {
        const struct rdma_nl_cbs *cb_table;
 
        if (!is_nl_msg_valid(type, op))
                return false;
 
+       /*
+        * Currently only NLDEV client is supporting netlink commands in
+        * non init_net net namespace.
+        */
+       if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
+               return false;
+
        if (!rdma_nl_types[type].cb_table) {
                mutex_unlock(&rdma_nl_mutex);
                request_module("rdma-netlink-subsys-%d", type);
@@ -161,7 +171,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        unsigned int op = RDMA_NL_GET_OP(type);
        const struct rdma_nl_cbs *cb_table;
 
-       if (!is_nl_valid(index, op))
+       if (!is_nl_valid(skb, index, op))
                return -EINVAL;
 
        cb_table = rdma_nl_types[index].cb_table;
@@ -185,7 +195,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                        .dump = cb_table[op].dump,
                };
                if (c.dump)
-                       return netlink_dump_start(nls, skb, nlh, &c);
+                       return netlink_dump_start(skb->sk, skb, nlh, &c);
                return -EINVAL;
        }
 
@@ -258,52 +268,65 @@ static void rdma_nl_rcv(struct sk_buff *skb)
        mutex_unlock(&rdma_nl_mutex);
 }
 
-int rdma_nl_unicast(struct sk_buff *skb, u32 pid)
+int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
 {
+       struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
        int err;
 
-       err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
+       err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
        return (err < 0) ? err : 0;
 }
 EXPORT_SYMBOL(rdma_nl_unicast);
 
-int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid)
+int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
 {
+       struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
        int err;
 
-       err = netlink_unicast(nls, skb, pid, 0);
+       err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
        return (err < 0) ? err : 0;
 }
 EXPORT_SYMBOL(rdma_nl_unicast_wait);
 
-int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags)
+int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
+                     unsigned int group, gfp_t flags)
 {
-       return nlmsg_multicast(nls, skb, 0, group, flags);
+       struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
+
+       return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
 }
 EXPORT_SYMBOL(rdma_nl_multicast);
 
-int __init rdma_nl_init(void)
+void rdma_nl_exit(void)
+{
+       int idx;
+
+       for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+               WARN(rdma_nl_types[idx].cb_table,
+                    "Netlink client %d wasn't released prior to unloading %s\n",
+                    idx, KBUILD_MODNAME);
+}
+
+int rdma_nl_net_init(struct rdma_dev_net *rnet)
 {
+       struct net *net = read_pnet(&rnet->net);
        struct netlink_kernel_cfg cfg = {
                .input  = rdma_nl_rcv,
        };
+       struct sock *nls;
 
-       nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
+       nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
        if (!nls)
                return -ENOMEM;
 
        nls->sk_sndtimeo = 10 * HZ;
+       rnet->nl_sock = nls;
        return 0;
 }
 
-void rdma_nl_exit(void)
+void rdma_nl_net_exit(struct rdma_dev_net *rnet)
 {
-       int idx;
-
-       for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
-               rdma_nl_unregister(idx);
-
-       netlink_kernel_release(nls);
+       netlink_kernel_release(rnet->nl_sock);
 }
 
 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
index 87d40d1ecdde8064ae36b03dce5963c3e9a390bc..cc08218f1ef7ded2bea728656e5cffce0e67e95c 100644 (file)
@@ -832,7 +832,7 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        nlmsg_end(msg, nlh);
 
        ib_device_put(device);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_free:
        nlmsg_free(msg);
@@ -972,7 +972,7 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        nlmsg_end(msg, nlh);
        ib_device_put(device);
 
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_free:
        nlmsg_free(msg);
@@ -1074,7 +1074,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        nlmsg_end(msg, nlh);
        ib_device_put(device);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_free:
        nlmsg_free(msg);
@@ -1251,7 +1251,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        nlmsg_end(msg, nlh);
        ib_device_put(device);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_free:
        nlmsg_free(msg);
@@ -1596,7 +1596,7 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
        put_device(data.cdev);
        if (ibdev)
                ib_device_put(ibdev);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 out_data:
        put_device(data.cdev);
@@ -1636,7 +1636,7 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
                return err;
        }
        nlmsg_end(msg, nlh);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 }
 
 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1734,7 +1734,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        nlmsg_end(msg, nlh);
        ib_device_put(device);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_fill:
        rdma_counter_unbind_qpn(device, port, qpn, cntn);
@@ -1802,7 +1802,7 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        nlmsg_end(msg, nlh);
        ib_device_put(device);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_fill:
        rdma_counter_bind_qpn(device, port, qpn, cntn);
@@ -1893,7 +1893,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
        mutex_unlock(&stats->lock);
        nlmsg_end(msg, nlh);
        ib_device_put(device);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_table:
        nla_nest_cancel(msg, table_attr);
@@ -1965,7 +1965,7 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        nlmsg_end(msg, nlh);
        ib_device_put(device);
-       return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+       return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_msg:
        nlmsg_free(msg);
index 7d8071c7e56428c22f04341881fc02c4cb92106c..17fc2936c077bf1c3cd931aa144d1d778af32c4e 100644 (file)
@@ -860,7 +860,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
        /* Repair the nlmsg header length */
        nlmsg_end(skb, nlh);
 
-       return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
+       return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
 }
 
 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
index b477295a96c2a6bb2ee47cd4950060f08b92cd1c..7a50cedcef1f60b9b24a2e610dd01c87b59a6c1c 100644 (file)
@@ -289,6 +289,24 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
                       ib_width_enum_to_int(attr.active_width), speed);
 }
 
+static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
+{
+       static const char * phys_state_str[] = {
+               "<unknown>",
+               "Sleep",
+               "Polling",
+               "Disabled",
+               "PortConfigurationTraining",
+               "LinkUp",
+               "LinkErrorRecovery",
+               "Phy Test",
+       };
+
+       if (phys_state < ARRAY_SIZE(phys_state_str))
+               return phys_state_str[phys_state];
+       return "<unknown>";
+}
+
 static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
                               char *buf)
 {
@@ -300,16 +318,8 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
        if (ret)
                return ret;
 
-       switch (attr.phys_state) {
-       case 1:  return sprintf(buf, "1: Sleep\n");
-       case 2:  return sprintf(buf, "2: Polling\n");
-       case 3:  return sprintf(buf, "3: Disabled\n");
-       case 4:  return sprintf(buf, "4: PortConfigurationTraining\n");
-       case 5:  return sprintf(buf, "5: LinkUp\n");
-       case 6:  return sprintf(buf, "6: LinkErrorRecovery\n");
-       case 7:  return sprintf(buf, "7: Phy Test\n");
-       default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
-       }
+       return sprintf(buf, "%d: %s\n", attr.phys_state,
+                      phys_state_to_str(attr.phys_state));
 }
 
 static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,
index ffdeaf6e0b686881bfa563ec0af80fca5e5cb4d5..d1407fa378e832bdfe45ba4c4b35645bd59b3a89 100644 (file)
@@ -1042,7 +1042,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
                                ib_unregister_mad_agent(file->agent[i]);
 
        mutex_unlock(&file->port->file_mutex);
-
+       mutex_destroy(&file->mutex);
        kfree(file);
        return 0;
 }
index 11c13c1381cf5c9d6afdb4596c36f8d4a7203c8b..02b57240176c6e56bfc4c2c79ff5f52f65077bee 100644 (file)
@@ -120,6 +120,8 @@ static void ib_uverbs_release_dev(struct device *device)
 
        uverbs_destroy_api(dev->uapi);
        cleanup_srcu_struct(&dev->disassociate_srcu);
+       mutex_destroy(&dev->lists_mutex);
+       mutex_destroy(&dev->xrcd_tree_mutex);
        kfree(dev);
 }
 
@@ -212,6 +214,8 @@ void ib_uverbs_release_file(struct kref *ref)
 
        if (file->disassociate_page)
                __free_pages(file->disassociate_page, 0);
+       mutex_destroy(&file->umap_lock);
+       mutex_destroy(&file->ucontext_lock);
        kfree(file);
 }
 
index 92349bf37589f79d4fa6a589882c6de5c5c21aa0..f974b68542245206936e623a6d62d6ef948cecce 100644 (file)
@@ -2259,6 +2259,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
                if (ret)
                        return ret;
        }
+       mutex_destroy(&xrcd->tgt_qp_mutex);
 
        return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
 }
index 098ab883733eeef71243852940567788eca3cbc2..f9e97d0cc459ced27d7f4191374a6fa759f52f05 100644 (file)
@@ -220,10 +220,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
 
        if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
                port_attr->state = IB_PORT_ACTIVE;
-               port_attr->phys_state = 5;
+               port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
        } else {
                port_attr->state = IB_PORT_DOWN;
-               port_attr->phys_state = 3;
+               port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
        }
        port_attr->max_mtu = IB_MTU_4096;
        port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
index 029babe713f3d6e002475fc89c7cbe5689dd5cb1..30a54f8aa42c0f77fc2fedc3b77f2963a97522ff 100644 (file)
@@ -1473,7 +1473,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                         &rdev->active_width);
        set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
        bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
-       bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
 
        return 0;
 free_sctx:
index e775c1a1a4506c147dfe369b2edcd7bc8cef68a5..dcf02ec02810c8646cf26cb73798d4079c68e62a 100644 (file)
@@ -991,33 +991,8 @@ static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
 static int iwch_query_port(struct ib_device *ibdev,
                           u8 port, struct ib_port_attr *props)
 {
-       struct iwch_dev *dev;
-       struct net_device *netdev;
-       struct in_device *inetdev;
-
        pr_debug("%s ibdev %p\n", __func__, ibdev);
 
-       dev = to_iwch_dev(ibdev);
-       netdev = dev->rdev.port_info.lldevs[port-1];
-
-       /* props being zeroed by the caller, avoid zeroing it here */
-       props->max_mtu = IB_MTU_4096;
-       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
-
-       if (!netif_carrier_ok(netdev))
-               props->state = IB_PORT_DOWN;
-       else {
-               inetdev = in_dev_get(netdev);
-               if (inetdev) {
-                       if (inetdev->ifa_list)
-                               props->state = IB_PORT_ACTIVE;
-                       else
-                               props->state = IB_PORT_INIT;
-                       in_dev_put(inetdev);
-               } else
-                       props->state = IB_PORT_INIT;
-       }
-
        props->port_cap_flags =
            IB_PORT_CM_SUP |
            IB_PORT_SNMP_TUNNEL_SUP |
@@ -1273,8 +1248,24 @@ static const struct ib_device_ops iwch_dev_ops = {
        INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
 };
 
+static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < rdev->port_info.nports; i++) {
+               ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i],
+                                          i + 1);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
 int iwch_register_device(struct iwch_dev *dev)
 {
+       int err;
+
        pr_debug("%s iwch_dev %p\n", __func__, dev);
        memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
        memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
@@ -1315,6 +1306,10 @@ int iwch_register_device(struct iwch_dev *dev)
 
        rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
        ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
+       err = set_netdevs(&dev->ibdev, &dev->rdev);
+       if (err)
+               return err;
+
        return ib_register_device(&dev->ibdev, "cxgb3_%d");
 }
 
index 5e59c570872989b7941cc2ce3906e5ba95b08a16..d373ac0fe2cb6a6144411d8bc9edfae696da8207 100644 (file)
@@ -305,32 +305,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
 static int c4iw_query_port(struct ib_device *ibdev, u8 port,
                           struct ib_port_attr *props)
 {
-       struct c4iw_dev *dev;
-       struct net_device *netdev;
-       struct in_device *inetdev;
-
        pr_debug("ibdev %p\n", ibdev);
 
-       dev = to_c4iw_dev(ibdev);
-       netdev = dev->rdev.lldi.ports[port-1];
-       /* props being zeroed by the caller, avoid zeroing it here */
-       props->max_mtu = IB_MTU_4096;
-       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
-
-       if (!netif_carrier_ok(netdev))
-               props->state = IB_PORT_DOWN;
-       else {
-               inetdev = in_dev_get(netdev);
-               if (inetdev) {
-                       if (inetdev->ifa_list)
-                               props->state = IB_PORT_ACTIVE;
-                       else
-                               props->state = IB_PORT_INIT;
-                       in_dev_put(inetdev);
-               } else
-                       props->state = IB_PORT_INIT;
-       }
-
        props->port_cap_flags =
            IB_PORT_CM_SUP |
            IB_PORT_SNMP_TUNNEL_SUP |
index 119f8efec56474340bb6a7cb9f13fee456e4e67d..2283e432693ea0771804915c2c25c8eae4298ffc 100644 (file)
@@ -156,5 +156,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                  int qp_attr_mask, struct ib_udata *udata);
 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
                                         u8 port_num);
+struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
+int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+                    u8 port_num, int index);
 
 #endif /* _EFA_H_ */
index 2cb42484b0f8e6a7d8ab23642df47d261a0c7bd6..3c412bc5b94f1c4c98f03e6556b0188e86c6125b 100644 (file)
@@ -109,17 +109,19 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
        } while (time_is_after_jiffies(exp_time));
 
        if (read_resp->req_id != mmio_read->seq_num) {
-               ibdev_err(edev->efa_dev,
-                         "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
-                         mmio_read->seq_num, offset, read_resp->req_id,
-                         read_resp->reg_off);
+               ibdev_err_ratelimited(
+                       edev->efa_dev,
+                       "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
+                       mmio_read->seq_num, offset, read_resp->req_id,
+                       read_resp->reg_off);
                err = EFA_MMIO_READ_INVALID;
                goto out;
        }
 
        if (read_resp->reg_off != offset) {
-               ibdev_err(edev->efa_dev,
-                         "Reading register failed: wrong offset provided\n");
+               ibdev_err_ratelimited(
+                       edev->efa_dev,
+                       "Reading register failed: wrong offset provided\n");
                err = EFA_MMIO_READ_INVALID;
                goto out;
        }
@@ -293,9 +295,10 @@ static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
        u16 ctx_id = cmd_id & (aq->depth - 1);
 
        if (aq->comp_ctx[ctx_id].occupied && capture) {
-               ibdev_err(aq->efa_dev,
-                         "Completion context for command_id %#x is occupied\n",
-                         cmd_id);
+               ibdev_err_ratelimited(
+                       aq->efa_dev,
+                       "Completion context for command_id %#x is occupied\n",
+                       cmd_id);
                return NULL;
        }
 
@@ -401,7 +404,7 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue
 
        spin_lock(&aq->sq.lock);
        if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
-               ibdev_err(aq->efa_dev, "Admin queue is closed\n");
+               ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
                spin_unlock(&aq->sq.lock);
                return ERR_PTR(-ENODEV);
        }
@@ -519,8 +522,9 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c
                        break;
 
                if (time_is_before_jiffies(timeout)) {
-                       ibdev_err(aq->efa_dev,
-                                 "Wait for completion (polling) timeout\n");
+                       ibdev_err_ratelimited(
+                               aq->efa_dev,
+                               "Wait for completion (polling) timeout\n");
                        /* EFA didn't have any completion */
                        atomic64_inc(&aq->stats.no_completion);
 
@@ -561,17 +565,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
                atomic64_inc(&aq->stats.no_completion);
 
                if (comp_ctx->status == EFA_CMD_COMPLETED)
-                       ibdev_err(aq->efa_dev,
-                                 "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
-                                 efa_com_cmd_str(comp_ctx->cmd_opcode),
-                                 comp_ctx->cmd_opcode, comp_ctx->status,
-                                 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+                       ibdev_err_ratelimited(
+                               aq->efa_dev,
+                               "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+                               efa_com_cmd_str(comp_ctx->cmd_opcode),
+                               comp_ctx->cmd_opcode, comp_ctx->status,
+                               comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
                else
-                       ibdev_err(aq->efa_dev,
-                                 "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
-                                 efa_com_cmd_str(comp_ctx->cmd_opcode),
-                                 comp_ctx->cmd_opcode, comp_ctx->status,
-                                 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+                       ibdev_err_ratelimited(
+                               aq->efa_dev,
+                               "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+                               efa_com_cmd_str(comp_ctx->cmd_opcode),
+                               comp_ctx->cmd_opcode, comp_ctx->status,
+                               comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
 
                clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
                err = -ETIME;
@@ -633,10 +639,11 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
                  cmd->aq_common_descriptor.opcode);
        comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
        if (IS_ERR(comp_ctx)) {
-               ibdev_err(aq->efa_dev,
-                         "Failed to submit command %s (opcode %u) err %ld\n",
-                         efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
-                         cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
+               ibdev_err_ratelimited(
+                       aq->efa_dev,
+                       "Failed to submit command %s (opcode %u) err %ld\n",
+                       efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+                       cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
 
                up(&aq->avail_cmds);
                return PTR_ERR(comp_ctx);
@@ -644,11 +651,12 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
 
        err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
        if (err)
-               ibdev_err(aq->efa_dev,
-                         "Failed to process command %s (opcode %u) comp_status %d err %d\n",
-                         efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
-                         cmd->aq_common_descriptor.opcode,
-                         comp_ctx->comp_status, err);
+               ibdev_err_ratelimited(
+                       aq->efa_dev,
+                       "Failed to process command %s (opcode %u) comp_status %d err %d\n",
+                       efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
+                       cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
+                       err);
 
        up(&aq->avail_cmds);
 
index 62345d8abf3ca20bde1ffbe79a1ab8253fb2a81c..501dce89f2758de4879dae58d1047e33cb15b28d 100644 (file)
@@ -44,7 +44,8 @@ int efa_com_create_qp(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&cmd_completion,
                               sizeof(cmd_completion));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to create qp [%d]\n", err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to create qp [%d]\n", err);
                return err;
        }
 
@@ -82,9 +83,10 @@ int efa_com_modify_qp(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&resp,
                               sizeof(resp));
        if (err) {
-               ibdev_err(edev->efa_dev,
-                         "Failed to modify qp-%u modify_mask[%#x] [%d]\n",
-                         cmd.qp_handle, cmd.modify_mask, err);
+               ibdev_err_ratelimited(
+                       edev->efa_dev,
+                       "Failed to modify qp-%u modify_mask[%#x] [%d]\n",
+                       cmd.qp_handle, cmd.modify_mask, err);
                return err;
        }
 
@@ -109,8 +111,9 @@ int efa_com_query_qp(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&resp,
                               sizeof(resp));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to query qp-%u [%d]\n",
-                         cmd.qp_handle, err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to query qp-%u [%d]\n",
+                                     cmd.qp_handle, err);
                return err;
        }
 
@@ -139,8 +142,9 @@ int efa_com_destroy_qp(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&cmd_completion,
                               sizeof(cmd_completion));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n",
-                         qp_cmd.qp_handle, err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to destroy qp-%u [%d]\n",
+                                     qp_cmd.qp_handle, err);
                return err;
        }
 
@@ -173,7 +177,8 @@ int efa_com_create_cq(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&cmd_completion,
                               sizeof(cmd_completion));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to create cq[%d]\n", err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to create cq[%d]\n", err);
                return err;
        }
 
@@ -201,8 +206,9 @@ int efa_com_destroy_cq(struct efa_com_dev *edev,
                               sizeof(destroy_resp));
 
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n",
-                         params->cq_idx, err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to destroy CQ-%u [%d]\n",
+                                     params->cq_idx, err);
                return err;
        }
 
@@ -250,7 +256,8 @@ int efa_com_register_mr(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&cmd_completion,
                               sizeof(cmd_completion));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to register mr [%d]\n", err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to register mr [%d]\n", err);
                return err;
        }
 
@@ -277,9 +284,9 @@ int efa_com_dereg_mr(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&cmd_completion,
                               sizeof(cmd_completion));
        if (err) {
-               ibdev_err(edev->efa_dev,
-                         "Failed to de-register mr(lkey-%u) [%d]\n",
-                         mr_cmd.l_key, err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to de-register mr(lkey-%u) [%d]\n",
+                                     mr_cmd.l_key, err);
                return err;
        }
 
@@ -306,8 +313,9 @@ int efa_com_create_ah(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&cmd_completion,
                               sizeof(cmd_completion));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to create ah for %pI6 [%d]\n",
-                         ah_cmd.dest_addr, err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to create ah for %pI6 [%d]\n",
+                                     ah_cmd.dest_addr, err);
                return err;
        }
 
@@ -334,8 +342,9 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&cmd_completion,
                               sizeof(cmd_completion));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n",
-                         ah_cmd.ah, ah_cmd.pd, err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to destroy ah-%d pd-%d [%d]\n",
+                                     ah_cmd.ah, ah_cmd.pd, err);
                return err;
        }
 
@@ -367,8 +376,9 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
        int err;
 
        if (!efa_com_check_supported_feature_id(edev, feature_id)) {
-               ibdev_err(edev->efa_dev, "Feature %d isn't supported\n",
-                         feature_id);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Feature %d isn't supported\n",
+                                     feature_id);
                return -EOPNOTSUPP;
        }
 
@@ -396,9 +406,10 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
                               sizeof(*get_resp));
 
        if (err) {
-               ibdev_err(edev->efa_dev,
-                         "Failed to submit get_feature command %d [%d]\n",
-                         feature_id, err);
+               ibdev_err_ratelimited(
+                       edev->efa_dev,
+                       "Failed to submit get_feature command %d [%d]\n",
+                       feature_id, err);
                return err;
        }
 
@@ -421,8 +432,9 @@ int efa_com_get_network_attr(struct efa_com_dev *edev,
        err = efa_com_get_feature(edev, &resp,
                                  EFA_ADMIN_NETWORK_ATTR);
        if (err) {
-               ibdev_err(edev->efa_dev,
-                         "Failed to get network attributes %d\n", err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to get network attributes %d\n",
+                                     err);
                return err;
        }
 
@@ -441,8 +453,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
 
        err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR);
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to get device attributes %d\n",
-                         err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to get device attributes %d\n",
+                                     err);
                return err;
        }
 
@@ -456,9 +469,10 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
        result->db_bar = resp.u.device_attr.db_bar;
 
        if (result->admin_api_version < 1) {
-               ibdev_err(edev->efa_dev,
-                         "Failed to get device attr api version [%u < 1]\n",
-                         result->admin_api_version);
+               ibdev_err_ratelimited(
+                       edev->efa_dev,
+                       "Failed to get device attr api version [%u < 1]\n",
+                       result->admin_api_version);
                return -EINVAL;
        }
 
@@ -466,8 +480,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
        err = efa_com_get_feature(edev, &resp,
                                  EFA_ADMIN_QUEUE_ATTR);
        if (err) {
-               ibdev_err(edev->efa_dev,
-                         "Failed to get network attributes %d\n", err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to get network attributes %d\n",
+                                     err);
                return err;
        }
 
@@ -497,7 +512,8 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev,
 
        err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS);
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to get hw hints %d\n", err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to get hw hints %d\n", err);
                return err;
        }
 
@@ -520,8 +536,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
        int err;
 
        if (!efa_com_check_supported_feature_id(edev, feature_id)) {
-               ibdev_err(edev->efa_dev, "Feature %d isn't supported\n",
-                         feature_id);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Feature %d isn't supported\n",
+                                     feature_id);
                return -EOPNOTSUPP;
        }
 
@@ -545,9 +562,10 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
                               sizeof(*set_resp));
 
        if (err) {
-               ibdev_err(edev->efa_dev,
-                         "Failed to submit set_feature command %d error: %d\n",
-                         feature_id, err);
+               ibdev_err_ratelimited(
+                       edev->efa_dev,
+                       "Failed to submit set_feature command %d error: %d\n",
+                       feature_id, err);
                return err;
        }
 
@@ -574,8 +592,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
 
        err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG);
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to get aenq attributes: %d\n",
-                         err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to get aenq attributes: %d\n",
+                                     err);
                return err;
        }
 
@@ -585,9 +604,10 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
                  get_resp.u.aenq.enabled_groups);
 
        if ((get_resp.u.aenq.supported_groups & groups) != groups) {
-               ibdev_err(edev->efa_dev,
-                         "Trying to set unsupported aenq groups[%#x] supported[%#x]\n",
-                         groups, get_resp.u.aenq.supported_groups);
+               ibdev_err_ratelimited(
+                       edev->efa_dev,
+                       "Trying to set unsupported aenq groups[%#x] supported[%#x]\n",
+                       groups, get_resp.u.aenq.supported_groups);
                return -EOPNOTSUPP;
        }
 
@@ -595,8 +615,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
        err = efa_com_set_feature(edev, &set_resp, &cmd,
                                  EFA_ADMIN_AENQ_CONFIG);
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to set aenq attributes: %d\n",
-                         err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to set aenq attributes: %d\n",
+                                     err);
                return err;
        }
 
@@ -619,7 +640,8 @@ int efa_com_alloc_pd(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&resp,
                               sizeof(resp));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to allocate pd[%d]\n", err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to allocate pd[%d]\n", err);
                return err;
        }
 
@@ -645,8 +667,9 @@ int efa_com_dealloc_pd(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&resp,
                               sizeof(resp));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to deallocate pd-%u [%d]\n",
-                         cmd.pd, err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to deallocate pd-%u [%d]\n",
+                                     cmd.pd, err);
                return err;
        }
 
@@ -669,7 +692,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&resp,
                               sizeof(resp));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to allocate uar[%d]\n", err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to allocate uar[%d]\n", err);
                return err;
        }
 
@@ -695,10 +719,47 @@ int efa_com_dealloc_uar(struct efa_com_dev *edev,
                               (struct efa_admin_acq_entry *)&resp,
                               sizeof(resp));
        if (err) {
-               ibdev_err(edev->efa_dev, "Failed to deallocate uar-%u [%d]\n",
-                         cmd.uar, err);
+               ibdev_err_ratelimited(edev->efa_dev,
+                                     "Failed to deallocate uar-%u [%d]\n",
+                                     cmd.uar, err);
                return err;
        }
 
        return 0;
 }
+
+int efa_com_get_stats(struct efa_com_dev *edev,
+                     struct efa_com_get_stats_params *params,
+                     union efa_com_get_stats_result *result)
+{
+       struct efa_com_admin_queue *aq = &edev->aq;
+       struct efa_admin_aq_get_stats_cmd cmd = {};
+       struct efa_admin_acq_get_stats_resp resp;
+       int err;
+
+       cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS;
+       cmd.type = params->type;
+       cmd.scope = params->scope;
+       cmd.scope_modifier = params->scope_modifier;
+
+       err = efa_com_cmd_exec(aq,
+                              (struct efa_admin_aq_entry *)&cmd,
+                              sizeof(cmd),
+                              (struct efa_admin_acq_entry *)&resp,
+                              sizeof(resp));
+       if (err) {
+               ibdev_err_ratelimited(
+                       edev->efa_dev,
+                       "Failed to get stats type-%u scope-%u.%u [%d]\n",
+                       cmd.type, cmd.scope, cmd.scope_modifier, err);
+               return err;
+       }
+
+       result->basic_stats.tx_bytes = resp.basic_stats.tx_bytes;
+       result->basic_stats.tx_pkts = resp.basic_stats.tx_pkts;
+       result->basic_stats.rx_bytes = resp.basic_stats.rx_bytes;
+       result->basic_stats.rx_pkts = resp.basic_stats.rx_pkts;
+       result->basic_stats.rx_drops = resp.basic_stats.rx_drops;
+
+       return 0;
+}
index a1174380462c15569e8d63b7c3a89d5db60cb3c0..7f6c13052f497699b5086ce6c8c6eebd4aa3ba07 100644 (file)
@@ -225,6 +225,26 @@ struct efa_com_dealloc_uar_params {
        u16 uarn;
 };
 
+struct efa_com_get_stats_params {
+       /* see enum efa_admin_get_stats_type */
+       u8 type;
+       /* see enum efa_admin_get_stats_scope */
+       u8 scope;
+       u16 scope_modifier;
+};
+
+struct efa_com_basic_stats {
+       u64 tx_bytes;
+       u64 tx_pkts;
+       u64 rx_bytes;
+       u64 rx_pkts;
+       u64 rx_drops;
+};
+
+union efa_com_get_stats_result {
+       struct efa_com_basic_stats basic_stats;
+};
+
 void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
 int efa_com_create_qp(struct efa_com_dev *edev,
                      struct efa_com_create_qp_params *params,
@@ -266,5 +286,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev,
                      struct efa_com_alloc_uar_result *result);
 int efa_com_dealloc_uar(struct efa_com_dev *edev,
                        struct efa_com_dealloc_uar_params *params);
+int efa_com_get_stats(struct efa_com_dev *edev,
+                     struct efa_com_get_stats_params *params,
+                     union efa_com_get_stats_result *result);
 
 #endif /* _EFA_COM_CMD_H_ */
index dd1c6d49466f5837fa4c33f1cf368e7cfdc77277..83858f7e83d0f2c2d8dd016d08f514d7af376783 100644 (file)
@@ -201,6 +201,7 @@ static const struct ib_device_ops efa_dev_ops = {
        .driver_id = RDMA_DRIVER_EFA,
        .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
 
+       .alloc_hw_stats = efa_alloc_hw_stats,
        .alloc_pd = efa_alloc_pd,
        .alloc_ucontext = efa_alloc_ucontext,
        .create_ah = efa_create_ah,
@@ -212,6 +213,7 @@ static const struct ib_device_ops efa_dev_ops = {
        .destroy_ah = efa_destroy_ah,
        .destroy_cq = efa_destroy_cq,
        .destroy_qp = efa_destroy_qp,
+       .get_hw_stats = efa_get_hw_stats,
        .get_link_layer = efa_port_link_layer,
        .get_port_immutable = efa_get_port_immutable,
        .mmap = efa_mmap,
index df77bc312a25388bee6e84c95f317daf542352cb..70851bd7f8019ddfff71ebde4cd5ba257512eb28 100644 (file)
@@ -41,6 +41,33 @@ static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
               ((u64)efa->mmap_page << PAGE_SHIFT);
 }
 
+#define EFA_DEFINE_STATS(op) \
+       op(EFA_TX_BYTES, "tx_bytes") \
+       op(EFA_TX_PKTS, "tx_pkts") \
+       op(EFA_RX_BYTES, "rx_bytes") \
+       op(EFA_RX_PKTS, "rx_pkts") \
+       op(EFA_RX_DROPS, "rx_drops") \
+       op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
+       op(EFA_COMPLETED_CMDS, "completed_cmds") \
+       op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
+       op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
+       op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
+       op(EFA_CREATE_QP_ERR, "create_qp_err") \
+       op(EFA_REG_MR_ERR, "reg_mr_err") \
+       op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
+       op(EFA_CREATE_AH_ERR, "create_ah_err")
+
+#define EFA_STATS_ENUM(ename, name) ename,
+#define EFA_STATS_STR(ename, name) [ename] = name,
+
+enum efa_hw_stats {
+       EFA_DEFINE_STATS(EFA_STATS_ENUM)
+};
+
+static const char *const efa_stats_names[] = {
+       EFA_DEFINE_STATS(EFA_STATS_STR)
+};
+
 #define EFA_CHUNK_PAYLOAD_SHIFT       12
 #define EFA_CHUNK_PAYLOAD_SIZE        BIT(EFA_CHUNK_PAYLOAD_SHIFT)
 #define EFA_CHUNK_PAYLOAD_PTR_SIZE    8
@@ -306,7 +333,7 @@ int efa_query_port(struct ib_device *ibdev, u8 port,
        props->lmc = 1;
 
        props->state = IB_PORT_ACTIVE;
-       props->phys_state = 5;
+       props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
        props->gid_tbl_len = 1;
        props->pkey_tbl_len = 1;
        props->active_speed = IB_SPEED_EDR;
@@ -1727,6 +1754,54 @@ void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
        efa_ah_destroy(dev, ah);
 }
 
+struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
+{
+       return rdma_alloc_hw_stats_struct(efa_stats_names,
+                                         ARRAY_SIZE(efa_stats_names),
+                                         RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+                    u8 port_num, int index)
+{
+       struct efa_com_get_stats_params params = {};
+       union efa_com_get_stats_result result;
+       struct efa_dev *dev = to_edev(ibdev);
+       struct efa_com_basic_stats *bs;
+       struct efa_com_stats_admin *as;
+       struct efa_stats *s;
+       int err;
+
+       params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
+       params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
+
+       err = efa_com_get_stats(&dev->edev, &params, &result);
+       if (err)
+               return err;
+
+       bs = &result.basic_stats;
+       stats->value[EFA_TX_BYTES] = bs->tx_bytes;
+       stats->value[EFA_TX_PKTS] = bs->tx_pkts;
+       stats->value[EFA_RX_BYTES] = bs->rx_bytes;
+       stats->value[EFA_RX_PKTS] = bs->rx_pkts;
+       stats->value[EFA_RX_DROPS] = bs->rx_drops;
+
+       as = &dev->edev.aq.stats;
+       stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
+       stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
+       stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
+
+       s = &dev->stats;
+       stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
+       stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
+       stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
+       stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
+       stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
+       stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
+
+       return ARRAY_SIZE(efa_stats_names);
+}
+
 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
                                         u8 port_num)
 {
index 4d8510b0fc3816b7f1836b9744c7a5967118cdc3..9972e0e6545e8a56fa3b6fdaea6d0a84d8b4361f 100644 (file)
@@ -110,12 +110,6 @@ enum pkt_q_sdma_state {
        SDMA_PKT_Q_DEFERRED,
 };
 
-/*
- * Maximum retry attempts to submit a TX request
- * before putting the process to sleep.
- */
-#define MAX_DEFER_RETRY_COUNT 1
-
 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
 
 #define SDMA_DBG(req, fmt, ...)                                     \
index 646f61545ed6be6b76998f0700d2185df22a81e5..9f53f63b1453647c053342c1dc37e1e0526a55b7 100644 (file)
@@ -874,16 +874,17 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
                        else
                                pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
 
-                       if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
-                               pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
                        pbc = create_pbc(ppd,
                                         pbc,
                                         qp->srate_mbps,
                                         vl,
                                         plen);
 
-                       /* Update HCRC based on packet opcode */
-                       pbc = update_hcrc(ps->opcode, pbc);
+                       if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
+                               pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
+                       else
+                               /* Update HCRC based on packet opcode */
+                               pbc = update_hcrc(ps->opcode, pbc);
                }
                tx->wqe = qp->s_wqe;
                ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
@@ -1030,12 +1031,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
                else
                        pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
 
+               pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
                if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
                        pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
-               pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
-
-               /* Update HCRC based on packet opcode */
-               pbc = update_hcrc(ps->opcode, pbc);
+               else
+                       /* Update HCRC based on packet opcode */
+                       pbc = update_hcrc(ps->opcode, pbc);
        }
        if (cb)
                iowait_pio_inc(&priv->s_iowait);
index 54782197c7172da17763fc5a2d747af31c4df4f5..d602b698b57eb06517a2fe338f4531649e7ae5fb 100644 (file)
@@ -8,8 +8,6 @@ config INFINIBAND_HNS
          is used in Hisilicon Hip06 and more further ICT SoC based on
          platform device.
 
-         To compile HIP06 or HIP08 driver as module, choose M here.
-
 config INFINIBAND_HNS_HIP06
        tristate "Hisilicon Hip06 Family RoCE support"
        depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
@@ -17,15 +15,9 @@ config INFINIBAND_HNS_HIP06
          RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
          Hip07 SoC. These RoCE engines are platform devices.
 
-         To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
-         module will be called hns-roce-hw-v1
-
 config INFINIBAND_HNS_HIP08
        tristate "Hisilicon Hip08 Family RoCE support"
        depends on INFINIBAND_HNS && PCI && HNS3
        ---help---
          RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
          The RoCE engine is a PCI device.
-
-         To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
-         module will be called hns-roce-hw-v2.
index 0cd09bf4d7eaeb64c1df5229ccd43950bb6859ad..ade26faade8dc505d1bcfc3b14db399956c93413 100644 (file)
@@ -211,7 +211,6 @@ int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
        mutex_init(&hr_dev->cmd.hcr_mutex);
        sema_init(&hr_dev->cmd.poll_sem, 1);
        hr_dev->cmd.use_events = 0;
-       hr_dev->cmd.toggle = 1;
        hr_dev->cmd.max_cmds = CMD_MAX_NUM;
        hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
                                           HNS_ROCE_MAILBOX_SIZE,
index 4e50c22a2da443e17a70697c92260d7518662a98..22541d19cd093a1700f820c6b5ec275131326a9a 100644 (file)
@@ -83,7 +83,6 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
 
 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
                             struct hns_roce_mtt *hr_mtt,
-                            struct hns_roce_uar *hr_uar,
                             struct hns_roce_cq *hr_cq, int vector)
 {
        struct hns_roce_cmd_mailbox *mailbox;
@@ -154,7 +153,6 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
 
        hr_cq->cons_index = 0;
        hr_cq->arm_sn = 1;
-       hr_cq->uar = hr_uar;
 
        atomic_set(&hr_cq->refcount, 1);
        init_completion(&hr_cq->free);
@@ -298,21 +296,127 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
                          &buf->hr_buf);
 }
 
+static int create_user_cq(struct hns_roce_dev *hr_dev,
+                         struct hns_roce_cq *hr_cq,
+                         struct ib_udata *udata,
+                         struct hns_roce_ib_create_cq_resp *resp,
+                         int cq_entries)
+{
+       struct hns_roce_ib_create_cq ucmd;
+       struct device *dev = hr_dev->dev;
+       int ret;
+       struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
+                                  udata, struct hns_roce_ucontext, ibucontext);
+
+       if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+               dev_err(dev, "Failed to copy_from_udata.\n");
+               return -EFAULT;
+       }
+
+       /* Get user space address, write it into mtt table */
+       ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
+                                     &hr_cq->umem, ucmd.buf_addr,
+                                     cq_entries);
+       if (ret) {
+               dev_err(dev, "Failed to get_cq_umem.\n");
+               return ret;
+       }
+
+       if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+           (udata->outlen >= sizeof(*resp))) {
+               ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
+                                          &hr_cq->db);
+               if (ret) {
+                       dev_err(dev, "cq record doorbell map failed!\n");
+                       goto err_mtt;
+               }
+               hr_cq->db_en = 1;
+               resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
+       }
+
+       return 0;
+
+err_mtt:
+       hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+       ib_umem_release(hr_cq->umem);
+
+       return ret;
+}
+
+static int create_kernel_cq(struct hns_roce_dev *hr_dev,
+                           struct hns_roce_cq *hr_cq, int cq_entries)
+{
+       struct device *dev = hr_dev->dev;
+       struct hns_roce_uar *uar;
+       int ret;
+
+       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+               ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
+               if (ret)
+                       return ret;
+
+               hr_cq->set_ci_db = hr_cq->db.db_record;
+               *hr_cq->set_ci_db = 0;
+               hr_cq->db_en = 1;
+       }
+
+       /* Init mtt table and write buff address to mtt table */
+       ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
+       if (ret) {
+               dev_err(dev, "Failed to alloc_cq_buf.\n");
+               goto err_db;
+       }
+
+       uar = &hr_dev->priv_uar;
+       hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
+                        DB_REG_OFFSET * uar->index;
+
+       return 0;
+
+err_db:
+       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+               hns_roce_free_db(hr_dev, &hr_cq->db);
+
+       return ret;
+}
+
+static void destroy_user_cq(struct hns_roce_dev *hr_dev,
+                           struct hns_roce_cq *hr_cq,
+                           struct ib_udata *udata,
+                           struct hns_roce_ib_create_cq_resp *resp)
+{
+       struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
+                                  udata, struct hns_roce_ucontext, ibucontext);
+
+       if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+           (udata->outlen >= sizeof(*resp)))
+               hns_roce_db_unmap_user(context, &hr_cq->db);
+
+       hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+       ib_umem_release(hr_cq->umem);
+}
+
+static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
+                             struct hns_roce_cq *hr_cq)
+{
+       hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+       hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
+
+       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+               hns_roce_free_db(hr_dev, &hr_cq->db);
+}
+
 int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
                          const struct ib_cq_init_attr *attr,
                          struct ib_udata *udata)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
        struct device *dev = hr_dev->dev;
-       struct hns_roce_ib_create_cq ucmd;
        struct hns_roce_ib_create_cq_resp resp = {};
        struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
-       struct hns_roce_uar *uar = NULL;
        int vector = attr->comp_vector;
        int cq_entries = attr->cqe;
        int ret;
-       struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
-               udata, struct hns_roce_ucontext, ibucontext);
 
        if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
                dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
@@ -328,61 +432,21 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
        spin_lock_init(&hr_cq->lock);
 
        if (udata) {
-               if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
-                       dev_err(dev, "Failed to copy_from_udata.\n");
-                       ret = -EFAULT;
-                       goto err_cq;
-               }
-
-               /* Get user space address, write it into mtt table */
-               ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
-                                             &hr_cq->umem, ucmd.buf_addr,
-                                             cq_entries);
+               ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
                if (ret) {
-                       dev_err(dev, "Failed to get_cq_umem.\n");
+                       dev_err(dev, "Create cq failed in user mode!\n");
                        goto err_cq;
                }
-
-               if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
-                   (udata->outlen >= sizeof(resp))) {
-                       ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
-                                                  &hr_cq->db);
-                       if (ret) {
-                               dev_err(dev, "cq record doorbell map failed!\n");
-                               goto err_mtt;
-                       }
-                       hr_cq->db_en = 1;
-                       resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
-               }
-
-               /* Get user space parameters */
-               uar = &context->uar;
        } else {
-               if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
-                       ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
-                       if (ret)
-                               goto err_cq;
-
-                       hr_cq->set_ci_db = hr_cq->db.db_record;
-                       *hr_cq->set_ci_db = 0;
-                       hr_cq->db_en = 1;
-               }
-
-               /* Init mmt table and write buff address to mtt table */
-               ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
-                                              cq_entries);
+               ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
                if (ret) {
-                       dev_err(dev, "Failed to alloc_cq_buf.\n");
-                       goto err_db;
+                       dev_err(dev, "Create cq failed in kernel mode!\n");
+                       goto err_cq;
                }
-
-               uar = &hr_dev->priv_uar;
-               hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
-                               DB_REG_OFFSET * uar->index;
        }
 
        /* Allocate cq index, fill cq_context */
-       ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
+       ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
                                hr_cq, vector);
        if (ret) {
                dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
@@ -416,20 +480,10 @@ err_cqc:
        hns_roce_free_cq(hr_dev, hr_cq);
 
 err_dbmap:
-       if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
-           (udata->outlen >= sizeof(resp)))
-               hns_roce_db_unmap_user(context, &hr_cq->db);
-
-err_mtt:
-       hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
-       ib_umem_release(hr_cq->umem);
-       if (!udata)
-               hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
-                                       hr_cq->ib_cq.cqe);
-
-err_db:
-       if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
-               hns_roce_free_db(hr_dev, &hr_cq->db);
+       if (udata)
+               destroy_user_cq(hr_dev, hr_cq, udata, &resp);
+       else
+               destroy_kernel_cq(hr_dev, hr_cq);
 
 err_cq:
        return ret;
index a548b28aab639ae09536982c3f875acf4b9faefa..c7bf738fb1373cba3fd6ee74e7931d1c8e810c18 100644 (file)
@@ -84,7 +84,6 @@
 #define HNS_ROCE_CEQ_ENTRY_SIZE                        0x4
 #define HNS_ROCE_AEQ_ENTRY_SIZE                        0x10
 
-/* 4G/4K = 1M */
 #define HNS_ROCE_SL_SHIFT                      28
 #define HNS_ROCE_TCLASS_SHIFT                  20
 #define HNS_ROCE_FLOW_LABEL_MASK               0xfffff
@@ -322,7 +321,7 @@ struct hns_roce_hem_table {
        unsigned long   num_hem;
        /* HEM entry record obj total num */
        unsigned long   num_obj;
-       /*Single obj size */
+       /* Single obj size */
        unsigned long   obj_size;
        unsigned long   table_chunk_size;
        int             lowmem;
@@ -343,7 +342,7 @@ struct hns_roce_mtt {
 
 struct hns_roce_buf_region {
        int offset; /* page offset */
-       u32 count; /* page count*/
+       u32 count; /* page count */
        int hopnum; /* addressing hop num */
 };
 
@@ -384,25 +383,25 @@ struct hns_roce_mr {
        u64                     size; /* Address range of MR */
        u32                     key; /* Key of MR */
        u32                     pd;   /* PD num of MR */
-       u32                     access;/* Access permission of MR */
+       u32                     access; /* Access permission of MR */
        u32                     npages;
        int                     enabled; /* MR's active status */
        int                     type;   /* MR's register type */
-       u64                     *pbl_buf;/* MR's PBL space */
+       u64                     *pbl_buf;       /* MR's PBL space */
        dma_addr_t              pbl_dma_addr;   /* MR's PBL space PA */
-       u32                     pbl_size;/* PA number in the PBL */
-       u64                     pbl_ba;/* page table address */
-       u32                     l0_chunk_last_num;/* L0 last number */
-       u32                     l1_chunk_last_num;/* L1 last number */
-       u64                     **pbl_bt_l2;/* PBL BT L2 */
-       u64                     **pbl_bt_l1;/* PBL BT L1 */
-       u64                     *pbl_bt_l0;/* PBL BT L0 */
-       dma_addr_t              *pbl_l2_dma_addr;/* PBL BT L2 dma addr */
-       dma_addr_t              *pbl_l1_dma_addr;/* PBL BT L1 dma addr */
-       dma_addr_t              pbl_l0_dma_addr;/* PBL BT L0 dma addr */
-       u32                     pbl_ba_pg_sz;/* BT chunk page size */
-       u32                     pbl_buf_pg_sz;/* buf chunk page size */
-       u32                     pbl_hop_num;/* multi-hop number */
+       u32                     pbl_size;       /* PA number in the PBL */
+       u64                     pbl_ba;         /* page table address */
+       u32                     l0_chunk_last_num;      /* L0 last number */
+       u32                     l1_chunk_last_num;      /* L1 last number */
+       u64                     **pbl_bt_l2;    /* PBL BT L2 */
+       u64                     **pbl_bt_l1;    /* PBL BT L1 */
+       u64                     *pbl_bt_l0;     /* PBL BT L0 */
+       dma_addr_t              *pbl_l2_dma_addr;       /* PBL BT L2 dma addr */
+       dma_addr_t              *pbl_l1_dma_addr;       /* PBL BT L1 dma addr */
+       dma_addr_t              pbl_l0_dma_addr;        /* PBL BT L0 dma addr */
+       u32                     pbl_ba_pg_sz;   /* BT chunk page size */
+       u32                     pbl_buf_pg_sz;  /* buf chunk page size */
+       u32                     pbl_hop_num;    /* multi-hop number */
 };
 
 struct hns_roce_mr_table {
@@ -425,16 +424,16 @@ struct hns_roce_wq {
        u32             max_post;
        int             max_gs;
        int             offset;
-       int             wqe_shift;/* WQE size */
+       int             wqe_shift;      /* WQE size */
        u32             head;
        u32             tail;
        void __iomem    *db_reg_l;
 };
 
 struct hns_roce_sge {
-       int             sge_cnt;  /* SGE num */
+       int             sge_cnt;        /* SGE num */
        int             offset;
-       int             sge_shift;/* SGE size */
+       int             sge_shift;      /* SGE size */
 };
 
 struct hns_roce_buf_list {
@@ -618,7 +617,6 @@ struct hns_roce_cmdq {
         * close device, switch into poll mode(non event mode)
         */
        u8                      use_events;
-       u8                      toggle;
 };
 
 struct hns_roce_cmd_mailbox {
@@ -654,8 +652,6 @@ struct hns_roce_qp {
        u32                     doorbell_qpn;
        __le32                  sq_signal_bits;
        u32                     sq_next_wqe;
-       int                     sq_max_wqes_per_wr;
-       int                     sq_spare_wqes;
        struct hns_roce_wq      sq;
 
        struct ib_umem          *umem;
@@ -752,7 +748,7 @@ struct hns_roce_eq {
        struct hns_roce_dev             *hr_dev;
        void __iomem                    *doorbell;
 
-       int                             type_flag;/* Aeq:1 ceq:0 */
+       int                             type_flag; /* Aeq:1 ceq:0 */
        int                             eqn;
        u32                             entries;
        int                             log_entries;
@@ -798,22 +794,22 @@ struct hns_roce_caps {
        int             local_ca_ack_delay;
        int             num_uars;
        u32             phy_num_uars;
-       u32             max_sq_sg;      /* 2 */
-       u32             max_sq_inline;  /* 32 */
-       u32             max_rq_sg;      /* 2 */
+       u32             max_sq_sg;
+       u32             max_sq_inline;
+       u32             max_rq_sg;
        u32             max_extend_sg;
-       int             num_qps;        /* 256k */
+       int             num_qps;
        int             reserved_qps;
        int             num_qpc_timer;
        int             num_cqc_timer;
        u32             max_srq_sg;
        int             num_srqs;
-       u32             max_wqes;       /* 16k */
+       u32             max_wqes;
        u32             max_srqs;
        u32             max_srq_wrs;
        u32             max_srq_sges;
-       u32             max_sq_desc_sz; /* 64 */
-       u32             max_rq_desc_sz; /* 64 */
+       u32             max_sq_desc_sz;
+       u32             max_rq_desc_sz;
        u32             max_srq_desc_sz;
        int             max_qp_init_rdma;
        int             max_qp_dest_rdma;
@@ -824,7 +820,7 @@ struct hns_roce_caps {
        int             reserved_cqs;
        int             reserved_srqs;
        u32             max_srqwqes;
-       int             num_aeq_vectors;        /* 1 */
+       int             num_aeq_vectors;
        int             num_comp_vectors;
        int             num_other_vectors;
        int             num_mtpts;
@@ -905,7 +901,7 @@ struct hns_roce_caps {
        u32             sl_num;
        u32             tsq_buf_pg_sz;
        u32             tpq_buf_pg_sz;
-       u32             chunk_sz;       /* chunk size in non multihop mode*/
+       u32             chunk_sz;       /* chunk size in non multihop mode */
        u64             flags;
 };
 
@@ -991,16 +987,6 @@ struct hns_roce_hw {
        const struct ib_device_ops *hns_roce_dev_srq_ops;
 };
 
-enum hns_phy_state {
-       HNS_ROCE_PHY_SLEEP              = 1,
-       HNS_ROCE_PHY_POLLING            = 2,
-       HNS_ROCE_PHY_DISABLED           = 3,
-       HNS_ROCE_PHY_TRAINING           = 4,
-       HNS_ROCE_PHY_LINKUP             = 5,
-       HNS_ROCE_PHY_LINKERR            = 6,
-       HNS_ROCE_PHY_TEST               = 7
-};
-
 struct hns_roce_dev {
        struct ib_device        ib_dev;
        struct platform_device  *pdev;
@@ -1045,8 +1031,8 @@ struct hns_roce_dev {
        int                     loop_idc;
        u32                     sdb_offset;
        u32                     odb_offset;
-       dma_addr_t              tptr_dma_addr; /*only for hw v1*/
-       u32                     tptr_size; /*only for hw v1*/
+       dma_addr_t              tptr_dma_addr;  /* only for hw v1 */
+       u32                     tptr_size;      /* only for hw v1 */
        const struct hns_roce_hw *hw;
        void                    *priv;
        struct workqueue_struct *irq_workq;
index f4da5bd2884fd14f7994abb50887a43d54dd3531..f2c4fef0b70e1921b6342eab57940c9ef2a57ce0 100644 (file)
 
 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
 {
-       if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
-           (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
-           (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
-           (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
-           (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
-           (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
-           (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
-           (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
-           (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
-           (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
-           (hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
-               return true;
-
-       return false;
+       int hop_num = 0;
+
+       switch (type) {
+       case HEM_TYPE_QPC:
+               hop_num = hr_dev->caps.qpc_hop_num;
+               break;
+       case HEM_TYPE_MTPT:
+               hop_num = hr_dev->caps.mpt_hop_num;
+               break;
+       case HEM_TYPE_CQC:
+               hop_num = hr_dev->caps.cqc_hop_num;
+               break;
+       case HEM_TYPE_SRQC:
+               hop_num = hr_dev->caps.srqc_hop_num;
+               break;
+       case HEM_TYPE_SCCC:
+               hop_num = hr_dev->caps.sccc_hop_num;
+               break;
+       case HEM_TYPE_QPC_TIMER:
+               hop_num = hr_dev->caps.qpc_timer_hop_num;
+               break;
+       case HEM_TYPE_CQC_TIMER:
+               hop_num = hr_dev->caps.cqc_timer_hop_num;
+               break;
+       case HEM_TYPE_CQE:
+               hop_num = hr_dev->caps.cqe_hop_num;
+               break;
+       case HEM_TYPE_MTT:
+               hop_num = hr_dev->caps.mtt_hop_num;
+               break;
+       case HEM_TYPE_SRQWQE:
+               hop_num = hr_dev->caps.srqwqe_hop_num;
+               break;
+       case HEM_TYPE_IDX:
+               hop_num = hr_dev->caps.idx_hop_num;
+               break;
+       default:
+               return false;
+       }
+
+       return hop_num ? true : false;
 }
 
 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
-                           u32 bt_chunk_num)
+                           u32 bt_chunk_num, u64 hem_max_num)
 {
-       int i;
+       u64 check_max_num = start_idx + bt_chunk_num;
+       u64 i;
 
-       for (i = 0; i < bt_chunk_num; i++)
-               if (hem[start_idx + i])
+       for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
+               if (hem[i])
                        return false;
 
        return true;
@@ -92,17 +120,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
                return 0;
 }
 
-int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
-                          struct hns_roce_hem_table *table, unsigned long *obj,
-                          struct hns_roce_hem_mhop *mhop)
+static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+                               struct hns_roce_hem_mhop *mhop,
+                               u32 type)
 {
        struct device *dev = hr_dev->dev;
-       u32 chunk_ba_num;
-       u32 table_idx;
-       u32 bt_num;
-       u32 chunk_size;
 
-       switch (table->type) {
+       switch (type) {
        case HEM_TYPE_QPC:
                mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
                                             + PAGE_SHIFT);
@@ -193,10 +217,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
                break;
        default:
                dev_err(dev, "Table %d not support multi-hop addressing!\n",
-                        table->type);
+                       type);
                return -EINVAL;
        }
 
+       return 0;
+}
+
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+                          struct hns_roce_hem_table *table, unsigned long *obj,
+                          struct hns_roce_hem_mhop *mhop)
+{
+       struct device *dev = hr_dev->dev;
+       u32 chunk_ba_num;
+       u32 table_idx;
+       u32 bt_num;
+       u32 chunk_size;
+
+       if (get_hem_table_config(hr_dev, mhop, table->type))
+               return -EINVAL;
+
        if (!obj)
                return 0;
 
@@ -324,7 +364,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
 {
        spinlock_t *lock = &hr_dev->bt_cmd_lock;
        struct device *dev = hr_dev->dev;
-       unsigned long end = 0;
+       long end;
        unsigned long flags;
        struct hns_roce_hem_iter iter;
        void __iomem *bt_cmd;
@@ -375,7 +415,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
                bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
 
                end = HW_SYNC_TIMEOUT_MSECS;
-               while (end) {
+               while (end > 0) {
                        if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
                                break;
 
@@ -457,6 +497,12 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
                return -EINVAL;
        }
 
+       if (unlikely(hem_idx >= table->num_hem)) {
+               dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n",
+                            table->type, hem_idx, table->num_hem);
+               return -EINVAL;
+       }
+
        mutex_lock(&table->mutex);
 
        if (table->hem[hem_idx]) {
@@ -693,7 +739,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
        if (check_whether_bt_num_2(table->type, hop_num)) {
                start_idx = mhop.l0_idx * chunk_ba_num;
                if (hns_roce_check_hem_null(table->hem, start_idx,
-                                           chunk_ba_num)) {
+                                           chunk_ba_num, table->num_hem)) {
                        if (table->type < HEM_TYPE_MTT &&
                            hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
                                dev_warn(dev, "Clear HEM base address failed.\n");
@@ -707,7 +753,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
                start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
                            mhop.l1_idx * chunk_ba_num;
                if (hns_roce_check_hem_null(table->hem, start_idx,
-                                           chunk_ba_num)) {
+                                           chunk_ba_num, table->num_hem)) {
                        if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
                                dev_warn(dev, "Clear HEM base address failed.\n");
 
@@ -791,7 +837,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
        } else {
                u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
 
-               hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+               if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
+                       goto out;
                /* mtt mhop */
                i = mhop.l0_idx;
                j = mhop.l1_idx;
@@ -840,11 +887,13 @@ int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
 {
        struct hns_roce_hem_mhop mhop;
        unsigned long inc = table->table_chunk_size / table->obj_size;
-       unsigned long i;
+       unsigned long i = 0;
        int ret;
 
        if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
-               hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+               ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+               if (ret)
+                       goto fail;
                inc = mhop.bt_chunk_size / table->obj_size;
        }
 
@@ -874,7 +923,8 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
        unsigned long i;
 
        if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
-               hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+               if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
+                       return;
                inc = mhop.bt_chunk_size / table->obj_size;
        }
 
@@ -887,7 +937,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
                            unsigned long obj_size, unsigned long nobj,
                            int use_lowmem)
 {
-       struct device *dev = hr_dev->dev;
        unsigned long obj_per_chunk;
        unsigned long num_hem;
 
@@ -900,99 +949,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
                if (!table->hem)
                        return -ENOMEM;
        } else {
+               struct hns_roce_hem_mhop mhop = {};
                unsigned long buf_chunk_size;
                unsigned long bt_chunk_size;
                unsigned long bt_chunk_num;
                unsigned long num_bt_l0 = 0;
                u32 hop_num;
 
-               switch (type) {
-               case HEM_TYPE_QPC:
-                       buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       num_bt_l0 = hr_dev->caps.qpc_bt_num;
-                       hop_num = hr_dev->caps.qpc_hop_num;
-                       break;
-               case HEM_TYPE_MTPT:
-                       buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       num_bt_l0 = hr_dev->caps.mpt_bt_num;
-                       hop_num = hr_dev->caps.mpt_hop_num;
-                       break;
-               case HEM_TYPE_CQC:
-                       buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       num_bt_l0 = hr_dev->caps.cqc_bt_num;
-                       hop_num = hr_dev->caps.cqc_hop_num;
-                       break;
-               case HEM_TYPE_SCCC:
-                       buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       num_bt_l0 = hr_dev->caps.sccc_bt_num;
-                       hop_num = hr_dev->caps.sccc_hop_num;
-                       break;
-               case HEM_TYPE_QPC_TIMER:
-                       buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
-                       hop_num = hr_dev->caps.qpc_timer_hop_num;
-                       break;
-               case HEM_TYPE_CQC_TIMER:
-                       buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
-                       hop_num = hr_dev->caps.cqc_timer_hop_num;
-                       break;
-               case HEM_TYPE_SRQC:
-                       buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       num_bt_l0 = hr_dev->caps.srqc_bt_num;
-                       hop_num = hr_dev->caps.srqc_hop_num;
-                       break;
-               case HEM_TYPE_MTT:
-                       buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = buf_chunk_size;
-                       hop_num = hr_dev->caps.mtt_hop_num;
-                       break;
-               case HEM_TYPE_CQE:
-                       buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = buf_chunk_size;
-                       hop_num = hr_dev->caps.cqe_hop_num;
-                       break;
-               case HEM_TYPE_SRQWQE:
-                       buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = buf_chunk_size;
-                       hop_num = hr_dev->caps.srqwqe_hop_num;
-                       break;
-               case HEM_TYPE_IDX:
-                       buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
-                                       + PAGE_SHIFT);
-                       bt_chunk_size = buf_chunk_size;
-                       hop_num = hr_dev->caps.idx_hop_num;
-                       break;
-               default:
-                       dev_err(dev,
-                         "Table %d not support to init hem table here!\n",
-                         type);
+               if (get_hem_table_config(hr_dev, &mhop, type))
                        return -EINVAL;
-               }
+
+               buf_chunk_size = mhop.buf_chunk_size;
+               bt_chunk_size = mhop.bt_chunk_size;
+               num_bt_l0 = mhop.ba_l0_num;
+               hop_num = mhop.hop_num;
+
                obj_per_chunk = buf_chunk_size / obj_size;
                num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
                bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
@@ -1075,7 +1046,8 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
        int i;
        u64 obj;
 
-       hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+       if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
+               return;
        buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
                                        mhop.bt_chunk_size;
 
index f1ccb8f35fe59f44a202056a1c0ff730f67799c2..86783276fb1f62db4e19710aa8c846b83d69a51f 100644 (file)
@@ -102,9 +102,9 @@ struct hns_roce_hem_mhop {
        u32     buf_chunk_size;
        u32     bt_chunk_size;
        u32     ba_l0_num;
-       u32     l0_idx;/* level 0 base address table index */
-       u32     l1_idx;/* level 1 base address table index */
-       u32     l2_idx;/* level 2 base address table index */
+       u32     l0_idx; /* level 0 base address table index */
+       u32     l1_idx; /* level 1 base address table index */
+       u32     l2_idx; /* level 2 base address table index */
 };
 
 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
index c07e387a07a38b88a02d175e6ddd889e13ef00bf..4c3ac2b7596615681cda4ffd013f4e2467591578 100644 (file)
@@ -971,7 +971,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
        struct hns_roce_free_mr *free_mr;
        struct hns_roce_v1_priv *priv;
        struct completion comp;
-       unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
+       long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
 
        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
        free_mr = &priv->free_mr;
@@ -991,7 +991,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
 
        queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
 
-       while (end) {
+       while (end > 0) {
                if (try_wait_for_completion(&comp))
                        return 0;
                msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
@@ -1109,7 +1109,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
        struct hns_roce_free_mr *free_mr;
        struct hns_roce_v1_priv *priv;
        struct completion comp;
-       unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
+       long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
        unsigned long start = jiffies;
        int npages;
        int ret = 0;
@@ -1139,7 +1139,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
 
        queue_work(free_mr->free_mr_wq, &(mr_work->work));
 
-       while (end) {
+       while (end > 0) {
                if (try_wait_for_completion(&comp))
                        goto free_mr;
                msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
@@ -2430,7 +2430,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
 {
        struct device *dev = &hr_dev->pdev->dev;
        struct hns_roce_v1_priv *priv;
-       unsigned long end = 0, flags = 0;
+       unsigned long flags = 0;
+       long end = HW_SYNC_TIMEOUT_MSECS;
        __le32 bt_cmd_val[2] = {0};
        void __iomem *bt_cmd;
        u64 bt_ba = 0;
@@ -2468,7 +2469,6 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
 
        bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
 
-       end = HW_SYNC_TIMEOUT_MSECS;
        while (1) {
                if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
                        if (!end) {
@@ -4639,10 +4639,8 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
        /* fetch the interrupt numbers */
        for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
                hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
-               if (hr_dev->irq[i] <= 0) {
-                       dev_err(dev, "platform get of irq[=%d] failed!\n", i);
+               if (hr_dev->irq[i] <= 0)
                        return -EINVAL;
-               }
        }
 
        return 0;
index b76e3beeafb8f5e296b01145312ce111b1b632f4..206dfdb16cd549b44f746ee9e8402c8a1c87d6ae 100644 (file)
@@ -239,7 +239,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
        struct device *dev = hr_dev->dev;
        struct hns_roce_v2_db sq_db;
        struct ib_qp_attr attr;
-       unsigned int sge_ind = 0;
+       unsigned int sge_ind;
        unsigned int owner_bit;
        unsigned long flags;
        unsigned int ind;
@@ -887,8 +887,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
                roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
                           upper_32_bits(dma));
                roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
-                         (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
-                          HNS_ROCE_CMQ_ENABLE);
+                          ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
                roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
                roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
        } else {
@@ -896,8 +895,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
                           upper_32_bits(dma));
                roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
-                         (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
-                          HNS_ROCE_CMQ_ENABLE);
+                          ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
                roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
                roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
        }
@@ -1310,7 +1308,7 @@ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
                cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
        desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
        roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
-       roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1);
+       roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
        roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
 
        return hns_roce_cmq_send(hr_dev, &desc, 1);
@@ -1888,7 +1886,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
                goto err_tpq_init_failed;
        }
 
-       /* Alloc memory for QPC Timer buffer space chunk*/
+       /* Alloc memory for QPC Timer buffer space chunk */
        for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
             qpc_count++) {
                ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
@@ -1899,7 +1897,7 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
                }
        }
 
-       /* Alloc memory for CQC Timer buffer space chunk*/
+       /* Alloc memory for CQC Timer buffer space chunk */
        for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
             cqc_count++) {
                ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
@@ -2409,7 +2407,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
 
        for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
             ++prod_index) {
-               if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
+               if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
                        break;
        }
 
@@ -2862,15 +2860,16 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
                wc->smac[5] = roce_get_field(cqe->byte_28,
                                             V2_CQE_BYTE_28_SMAC_5_M,
                                             V2_CQE_BYTE_28_SMAC_5_S);
+               wc->wc_flags |= IB_WC_WITH_SMAC;
                if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
                        wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
                                                          V2_CQE_BYTE_28_VID_M,
                                                          V2_CQE_BYTE_28_VID_S);
+                       wc->wc_flags |= IB_WC_WITH_VLAN;
                } else {
                        wc->vlan_id = 0xffff;
                }
 
-               wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
                wc->network_hdr_type = roce_get_field(cqe->byte_28,
                                                    V2_CQE_BYTE_28_PORT_TYPE_M,
                                                    V2_CQE_BYTE_28_PORT_TYPE_S);
@@ -2905,11 +2904,49 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
        return npolled;
 }
 
+static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
+                             int step_idx)
+{
+       int op;
+
+       if (type == HEM_TYPE_SCCC && step_idx)
+               return -EINVAL;
+
+       switch (type) {
+       case HEM_TYPE_QPC:
+               op = HNS_ROCE_CMD_WRITE_QPC_BT0;
+               break;
+       case HEM_TYPE_MTPT:
+               op = HNS_ROCE_CMD_WRITE_MPT_BT0;
+               break;
+       case HEM_TYPE_CQC:
+               op = HNS_ROCE_CMD_WRITE_CQC_BT0;
+               break;
+       case HEM_TYPE_SRQC:
+               op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+               break;
+       case HEM_TYPE_SCCC:
+               op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
+               break;
+       case HEM_TYPE_QPC_TIMER:
+               op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
+               break;
+       case HEM_TYPE_CQC_TIMER:
+               op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
+               break;
+       default:
+               dev_warn(hr_dev->dev,
+                        "Table %d not to be written by mailbox!\n", type);
+               return -EINVAL;
+       }
+
+       return op + step_idx;
+}
+
 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
                               struct hns_roce_hem_table *table, int obj,
                               int step_idx)
 {
-       struct device *dev = hr_dev->dev;
        struct hns_roce_cmd_mailbox *mailbox;
        struct hns_roce_hem_iter iter;
        struct hns_roce_hem_mhop mhop;
@@ -2922,7 +2959,7 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
        u64 bt_ba = 0;
        u32 chunk_ba_num;
        u32 hop_num;
-       u16 op = 0xff;
+       int op;
 
        if (!hns_roce_check_whether_mhop(hr_dev, table->type))
                return 0;
@@ -2944,38 +2981,9 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
                hem_idx = i;
        }
 
-       switch (table->type) {
-       case HEM_TYPE_QPC:
-               op = HNS_ROCE_CMD_WRITE_QPC_BT0;
-               break;
-       case HEM_TYPE_MTPT:
-               op = HNS_ROCE_CMD_WRITE_MPT_BT0;
-               break;
-       case HEM_TYPE_CQC:
-               op = HNS_ROCE_CMD_WRITE_CQC_BT0;
-               break;
-       case HEM_TYPE_SRQC:
-               op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
-               break;
-       case HEM_TYPE_SCCC:
-               op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
-               break;
-       case HEM_TYPE_QPC_TIMER:
-               op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
-               break;
-       case HEM_TYPE_CQC_TIMER:
-               op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
-               break;
-       default:
-               dev_warn(dev, "Table %d not to be written by mailbox!\n",
-                        table->type);
+       op = get_op_for_set_hem(hr_dev, table->type, step_idx);
+       if (op == -EINVAL)
                return 0;
-       }
-
-       if (table->type == HEM_TYPE_SCCC && step_idx)
-               return 0;
-
-       op += step_idx;
 
        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
        if (IS_ERR(mailbox))
@@ -3118,6 +3126,43 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
        roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
 }
 
+static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
+                           struct hns_roce_v2_qp_context *context,
+                           struct hns_roce_v2_qp_context *qpc_mask)
+{
+       if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+               roce_set_field(context->byte_4_sqpn_tst,
+                              V2_QPC_BYTE_4_SGE_SHIFT_M,
+                              V2_QPC_BYTE_4_SGE_SHIFT_S,
+                              ilog2((unsigned int)hr_qp->sge.sge_cnt));
+       else
+               roce_set_field(context->byte_4_sqpn_tst,
+                              V2_QPC_BYTE_4_SGE_SHIFT_M,
+                              V2_QPC_BYTE_4_SGE_SHIFT_S,
+                              hr_qp->sq.max_gs >
+                              HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
+                              ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
+       roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+                      V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
+
+       roce_set_field(context->byte_20_smac_sgid_idx,
+                      V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
+                      ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+       roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+                      V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
+
+       roce_set_field(context->byte_20_smac_sgid_idx,
+                      V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
+                      (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+                      hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
+                      hr_qp->ibqp.srq) ? 0 :
+                      ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+
+       roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+                      V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+}
+
 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
                                    const struct ib_qp_attr *attr,
                                    int attr_mask,
@@ -3138,21 +3183,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
                       V2_QPC_BYTE_4_TST_S, 0);
 
-       if (ibqp->qp_type == IB_QPT_GSI)
-               roce_set_field(context->byte_4_sqpn_tst,
-                              V2_QPC_BYTE_4_SGE_SHIFT_M,
-                              V2_QPC_BYTE_4_SGE_SHIFT_S,
-                              ilog2((unsigned int)hr_qp->sge.sge_cnt));
-       else
-               roce_set_field(context->byte_4_sqpn_tst,
-                              V2_QPC_BYTE_4_SGE_SHIFT_M,
-                              V2_QPC_BYTE_4_SGE_SHIFT_S,
-                              hr_qp->sq.max_gs > 2 ?
-                              ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
-
-       roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
-                      V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
-
        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
                       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
@@ -3168,19 +3198,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
                       V2_QPC_BYTE_20_RQWS_S, 0);
 
-       roce_set_field(context->byte_20_smac_sgid_idx,
-                      V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
-                      ilog2((unsigned int)hr_qp->sq.wqe_cnt));
-       roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
-                      V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
-
-       roce_set_field(context->byte_20_smac_sgid_idx,
-                      V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
-                      (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
-                      hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
-                      ilog2((unsigned int)hr_qp->rq.wqe_cnt));
-       roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
-                      V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+       set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
 
        /* No VLAN need to set 0xFFF */
        roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
@@ -3456,22 +3474,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
                       V2_QPC_BYTE_4_TST_S, 0);
 
-       if (ibqp->qp_type == IB_QPT_GSI)
-               roce_set_field(context->byte_4_sqpn_tst,
-                              V2_QPC_BYTE_4_SGE_SHIFT_M,
-                              V2_QPC_BYTE_4_SGE_SHIFT_S,
-                              ilog2((unsigned int)hr_qp->sge.sge_cnt));
-       else
-               roce_set_field(context->byte_4_sqpn_tst,
-                              V2_QPC_BYTE_4_SGE_SHIFT_M,
-                              V2_QPC_BYTE_4_SGE_SHIFT_S,
-                              hr_qp->sq.max_gs >
-                              HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
-                              ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
-
-       roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
-                      V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
-
        if (attr_mask & IB_QP_ACCESS_FLAGS) {
                roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
@@ -3506,20 +3508,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
                             0);
        }
 
-       roce_set_field(context->byte_20_smac_sgid_idx,
-                      V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
-                      ilog2((unsigned int)hr_qp->sq.wqe_cnt));
-       roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
-                      V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
-
-       roce_set_field(context->byte_20_smac_sgid_idx,
-                      V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
-                      (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
-                      hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT || ibqp->srq) ? 0 :
-                      ilog2((unsigned int)hr_qp->rq.wqe_cnt));
-       roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
-                      V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
-
        roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
                       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
        roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
@@ -3974,30 +3962,119 @@ static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
 
 }
 
-static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
-                                const struct ib_qp_attr *attr,
-                                int attr_mask, enum ib_qp_state cur_state,
-                                enum ib_qp_state new_state)
+static int hns_roce_v2_set_path(struct ib_qp *ibqp,
+                               const struct ib_qp_attr *attr,
+                               int attr_mask,
+                               struct hns_roce_v2_qp_context *context,
+                               struct hns_roce_v2_qp_context *qpc_mask)
 {
+       const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
-       struct hns_roce_v2_qp_context *context;
-       struct hns_roce_v2_qp_context *qpc_mask;
-       struct device *dev = hr_dev->dev;
-       int ret = -EINVAL;
+       const struct ib_gid_attr *gid_attr = NULL;
+       int is_roce_protocol;
+       bool is_udp = false;
+       u16 vlan = 0xffff;
+       u8 ib_port;
+       u8 hr_port;
+       int ret;
 
-       context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
-       if (!context)
-               return -ENOMEM;
+       ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
+       hr_port = ib_port - 1;
+       is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
+                          rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
+
+       if (is_roce_protocol) {
+               gid_attr = attr->ah_attr.grh.sgid_attr;
+               ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
+               if (ret)
+                       return ret;
+
+               if (gid_attr)
+                       is_udp = (gid_attr->gid_type ==
+                                IB_GID_TYPE_ROCE_UDP_ENCAP);
+       }
+
+       if (vlan < VLAN_CFI_MASK) {
+               roce_set_bit(context->byte_76_srqn_op_en,
+                            V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
+               roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+                            V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
+               roce_set_bit(context->byte_168_irrl_idx,
+                            V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
+               roce_set_bit(qpc_mask->byte_168_irrl_idx,
+                            V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
+       }
+
+       roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
+                      V2_QPC_BYTE_24_VLAN_ID_S, vlan);
+       roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
+                      V2_QPC_BYTE_24_VLAN_ID_S, 0);
+
+       if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
+               dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
+                       grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
+               return -EINVAL;
+       }
+
+       if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
+               dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
+               return -EINVAL;
+       }
+
+       roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
+                      V2_QPC_BYTE_52_UDPSPN_S,
+                      is_udp ? 0x12b7 : 0);
+
+       roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
+                      V2_QPC_BYTE_52_UDPSPN_S, 0);
+
+       roce_set_field(context->byte_20_smac_sgid_idx,
+                      V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
+                      grh->sgid_index);
+
+       roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+                      V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
+
+       roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+                      V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
+       roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+                      V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+
+       if (hr_dev->pci_dev->revision == 0x21 && is_udp)
+               roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+                              V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
+       else
+               roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+                              V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+       roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+                      V2_QPC_BYTE_24_TC_S, 0);
+       roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+                      V2_QPC_BYTE_28_FL_S, grh->flow_label);
+       roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+                      V2_QPC_BYTE_28_FL_S, 0);
+       memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+       memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+       roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+                      V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
+       roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+                      V2_QPC_BYTE_28_SL_S, 0);
+       hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+       return 0;
+}
+
+static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
+                                     const struct ib_qp_attr *attr,
+                                     int attr_mask,
+                                     enum ib_qp_state cur_state,
+                                     enum ib_qp_state new_state,
+                                     struct hns_roce_v2_qp_context *context,
+                                     struct hns_roce_v2_qp_context *qpc_mask)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       int ret = 0;
 
-       qpc_mask = context + 1;
-       /*
-        * In v2 engine, software pass context and context mask to hardware
-        * when modifying qp. If software need modify some fields in context,
-        * we should set all bits of the relevant fields in context mask to
-        * 0 at the same time, else set them to 0x1.
-        */
-       memset(qpc_mask, 0xff, sizeof(*qpc_mask));
        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
                memset(qpc_mask, 0, sizeof(*qpc_mask));
                modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
@@ -4019,134 +4096,30 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                /* Nothing */
                ;
        } else {
-               dev_err(dev, "Illegal state for QP!\n");
+               dev_err(hr_dev->dev, "Illegal state for QP!\n");
                ret = -EINVAL;
                goto out;
        }
 
-       /* When QP state is err, SQ and RQ WQE should be flushed */
-       if (new_state == IB_QPS_ERR) {
-               roce_set_field(context->byte_160_sq_ci_pi,
-                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
-                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
-                              hr_qp->sq.head);
-               roce_set_field(qpc_mask->byte_160_sq_ci_pi,
-                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
-                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+out:
+       return ret;
+}
 
-               if (!ibqp->srq) {
-                       roce_set_field(context->byte_84_rq_ci_pi,
-                              V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
-                              V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
-                              hr_qp->rq.head);
-                       roce_set_field(qpc_mask->byte_84_rq_ci_pi,
-                              V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
-                              V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
-               }
-       }
+static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
+                                     const struct ib_qp_attr *attr,
+                                     int attr_mask,
+                                     struct hns_roce_v2_qp_context *context,
+                                     struct hns_roce_v2_qp_context *qpc_mask)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+       int ret = 0;
 
        if (attr_mask & IB_QP_AV) {
-               const struct ib_global_route *grh =
-                                           rdma_ah_read_grh(&attr->ah_attr);
-               const struct ib_gid_attr *gid_attr = NULL;
-               int is_roce_protocol;
-               u16 vlan = 0xffff;
-               u8 ib_port;
-               u8 hr_port;
-
-               ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
-                          hr_qp->port + 1;
-               hr_port = ib_port - 1;
-               is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
-                              rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
-
-               if (is_roce_protocol) {
-                       gid_attr = attr->ah_attr.grh.sgid_attr;
-                       ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
-                       if (ret)
-                               goto out;
-               }
-
-               if (vlan < VLAN_CFI_MASK) {
-                       roce_set_bit(context->byte_76_srqn_op_en,
-                                    V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
-                       roce_set_bit(qpc_mask->byte_76_srqn_op_en,
-                                    V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
-                       roce_set_bit(context->byte_168_irrl_idx,
-                                    V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
-                       roce_set_bit(qpc_mask->byte_168_irrl_idx,
-                                    V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
-               }
-
-               roce_set_field(context->byte_24_mtu_tc,
-                              V2_QPC_BYTE_24_VLAN_ID_M,
-                              V2_QPC_BYTE_24_VLAN_ID_S, vlan);
-               roce_set_field(qpc_mask->byte_24_mtu_tc,
-                              V2_QPC_BYTE_24_VLAN_ID_M,
-                              V2_QPC_BYTE_24_VLAN_ID_S, 0);
-
-               if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
-                       dev_err(hr_dev->dev,
-                               "sgid_index(%u) too large. max is %d\n",
-                               grh->sgid_index,
-                               hr_dev->caps.gid_table_len[hr_port]);
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
-                       dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               roce_set_field(context->byte_52_udpspn_dmac,
-                          V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
-                          (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
-                          0 : 0x12b7);
-
-               roce_set_field(qpc_mask->byte_52_udpspn_dmac,
-                              V2_QPC_BYTE_52_UDPSPN_M,
-                              V2_QPC_BYTE_52_UDPSPN_S, 0);
-
-               roce_set_field(context->byte_20_smac_sgid_idx,
-                              V2_QPC_BYTE_20_SGID_IDX_M,
-                              V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
-
-               roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
-                              V2_QPC_BYTE_20_SGID_IDX_M,
-                              V2_QPC_BYTE_20_SGID_IDX_S, 0);
-
-               roce_set_field(context->byte_24_mtu_tc,
-                              V2_QPC_BYTE_24_HOP_LIMIT_M,
-                              V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
-               roce_set_field(qpc_mask->byte_24_mtu_tc,
-                              V2_QPC_BYTE_24_HOP_LIMIT_M,
-                              V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
-
-               if (hr_dev->pci_dev->revision == 0x21 &&
-                   gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
-                       roce_set_field(context->byte_24_mtu_tc,
-                                      V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
-                                      grh->traffic_class >> 2);
-               else
-                       roce_set_field(context->byte_24_mtu_tc,
-                                      V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
-                                      grh->traffic_class);
-               roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
-                              V2_QPC_BYTE_24_TC_S, 0);
-               roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
-                              V2_QPC_BYTE_28_FL_S, grh->flow_label);
-               roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
-                              V2_QPC_BYTE_28_FL_S, 0);
-               memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
-               memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
-               roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
-                              V2_QPC_BYTE_28_SL_S,
-                              rdma_ah_get_sl(&attr->ah_attr));
-               roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
-                              V2_QPC_BYTE_28_SL_S, 0);
-               hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+               ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
+                                          qpc_mask);
+               if (ret)
+                       return ret;
        }
 
        if (attr_mask & IB_QP_TIMEOUT) {
@@ -4158,7 +4131,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                                       V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
                                       0);
                } else {
-                       dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
+                       dev_warn(hr_dev->dev,
+                                "Local ACK timeout shall be 0 to 30.\n");
                }
        }
 
@@ -4196,6 +4170,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                               V2_QPC_BYTE_244_RNR_CNT_S, 0);
        }
 
+       /* RC&UC&UD required attr */
        if (attr_mask & IB_QP_SQ_PSN) {
                roce_set_field(context->byte_172_sq_psn,
                               V2_QPC_BYTE_172_SQ_CUR_PSN_M,
@@ -4295,6 +4270,80 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                hr_qp->qkey = attr->qkey;
        }
 
+       return ret;
+}
+
+static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
+                                         const struct ib_qp_attr *attr,
+                                         int attr_mask)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+       if (attr_mask & IB_QP_ACCESS_FLAGS)
+               hr_qp->atomic_rd_en = attr->qp_access_flags;
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               hr_qp->resp_depth = attr->max_dest_rd_atomic;
+       if (attr_mask & IB_QP_PORT) {
+               hr_qp->port = attr->port_num - 1;
+               hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+       }
+}
+
+static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+                                const struct ib_qp_attr *attr,
+                                int attr_mask, enum ib_qp_state cur_state,
+                                enum ib_qp_state new_state)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+       struct hns_roce_v2_qp_context ctx[2];
+       struct hns_roce_v2_qp_context *context = ctx;
+       struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
+       struct device *dev = hr_dev->dev;
+       int ret;
+
+       /*
+        * In v2 engine, software pass context and context mask to hardware
+        * when modifying qp. If software need modify some fields in context,
+        * we should set all bits of the relevant fields in context mask to
+        * 0 at the same time, else set them to 0x1.
+        */
+       memset(context, 0, sizeof(*context));
+       memset(qpc_mask, 0xff, sizeof(*qpc_mask));
+       ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
+                                        new_state, context, qpc_mask);
+       if (ret)
+               goto out;
+
+       /* When QP state is err, SQ and RQ WQE should be flushed */
+       if (new_state == IB_QPS_ERR) {
+               roce_set_field(context->byte_160_sq_ci_pi,
+                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
+                              hr_qp->sq.head);
+               roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+
+               if (!ibqp->srq) {
+                       roce_set_field(context->byte_84_rq_ci_pi,
+                              V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+                              V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
+                              hr_qp->rq.head);
+                       roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+                              V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+                              V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+               }
+       }
+
+       /* Configure the optional fields */
+       ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
+                                        qpc_mask);
+       if (ret)
+               goto out;
+
        roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
                     ibqp->srq ? 1 : 0);
        roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
@@ -4307,8 +4356,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                       V2_QPC_BYTE_60_QP_ST_S, 0);
 
        /* SW pass context to HW */
-       ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state,
-                                   context, hr_qp);
+       ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp);
        if (ret) {
                dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
                goto out;
@@ -4316,15 +4364,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
 
        hr_qp->state = new_state;
 
-       if (attr_mask & IB_QP_ACCESS_FLAGS)
-               hr_qp->atomic_rd_en = attr->qp_access_flags;
-
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-               hr_qp->resp_depth = attr->max_dest_rd_atomic;
-       if (attr_mask & IB_QP_PORT) {
-               hr_qp->port = attr->port_num - 1;
-               hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
-       }
+       hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
 
        if (new_state == IB_QPS_RESET && !ibqp->uobject) {
                hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
@@ -4344,7 +4384,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
        }
 
 out:
-       kfree(context);
        return ret;
 }
 
@@ -4395,16 +4434,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
-       struct hns_roce_v2_qp_context *context;
+       struct hns_roce_v2_qp_context context = {};
        struct device *dev = hr_dev->dev;
        int tmp_qp_state;
        int state;
        int ret;
 
-       context = kzalloc(sizeof(*context), GFP_KERNEL);
-       if (!context)
-               return -ENOMEM;
-
        memset(qp_attr, 0, sizeof(*qp_attr));
        memset(qp_init_attr, 0, sizeof(*qp_init_attr));
 
@@ -4416,14 +4451,14 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                goto done;
        }
 
-       ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
+       ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
        if (ret) {
                dev_err(dev, "query qpc error\n");
                ret = -EINVAL;
                goto out;
        }
 
-       state = roce_get_field(context->byte_60_qpst_tempid,
+       state = roce_get_field(context.byte_60_qpst_tempid,
                               V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
        tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
        if (tmp_qp_state == -1) {
@@ -4433,7 +4468,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
        }
        hr_qp->state = (u8)tmp_qp_state;
        qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
-       qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
+       qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
                                                        V2_QPC_BYTE_24_MTU_M,
                                                        V2_QPC_BYTE_24_MTU_S);
        qp_attr->path_mig_state = IB_MIG_ARMED;
@@ -4441,20 +4476,20 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
        if (hr_qp->ibqp.qp_type == IB_QPT_UD)
                qp_attr->qkey = V2_QKEY_VAL;
 
-       qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
+       qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
                                         V2_QPC_BYTE_108_RX_REQ_EPSN_M,
                                         V2_QPC_BYTE_108_RX_REQ_EPSN_S);
-       qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
+       qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
                                              V2_QPC_BYTE_172_SQ_CUR_PSN_M,
                                              V2_QPC_BYTE_172_SQ_CUR_PSN_S);
-       qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
+       qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
                                                  V2_QPC_BYTE_56_DQPN_M,
                                                  V2_QPC_BYTE_56_DQPN_S);
-       qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
+       qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
                                    V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) |
-                                   ((roce_get_bit(context->byte_76_srqn_op_en,
+                                   ((roce_get_bit(context.byte_76_srqn_op_en,
                                    V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) |
-                                   ((roce_get_bit(context->byte_76_srqn_op_en,
+                                   ((roce_get_bit(context.byte_76_srqn_op_en,
                                    V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
 
        if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
@@ -4463,43 +4498,43 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                                rdma_ah_retrieve_grh(&qp_attr->ah_attr);
 
                rdma_ah_set_sl(&qp_attr->ah_attr,
-                              roce_get_field(context->byte_28_at_fl,
+                              roce_get_field(context.byte_28_at_fl,
                                              V2_QPC_BYTE_28_SL_M,
                                              V2_QPC_BYTE_28_SL_S));
-               grh->flow_label = roce_get_field(context->byte_28_at_fl,
+               grh->flow_label = roce_get_field(context.byte_28_at_fl,
                                                 V2_QPC_BYTE_28_FL_M,
                                                 V2_QPC_BYTE_28_FL_S);
-               grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
+               grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
                                                 V2_QPC_BYTE_20_SGID_IDX_M,
                                                 V2_QPC_BYTE_20_SGID_IDX_S);
-               grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
+               grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
                                                V2_QPC_BYTE_24_HOP_LIMIT_M,
                                                V2_QPC_BYTE_24_HOP_LIMIT_S);
-               grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
+               grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
                                                    V2_QPC_BYTE_24_TC_M,
                                                    V2_QPC_BYTE_24_TC_S);
 
-               memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
+               memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
        }
 
        qp_attr->port_num = hr_qp->port + 1;
        qp_attr->sq_draining = 0;
-       qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
+       qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
                                                     V2_QPC_BYTE_208_SR_MAX_M,
                                                     V2_QPC_BYTE_208_SR_MAX_S);
-       qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
+       qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
                                                     V2_QPC_BYTE_140_RR_MAX_M,
                                                     V2_QPC_BYTE_140_RR_MAX_S);
-       qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
+       qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
                                                 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
                                                 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
-       qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
+       qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
                                              V2_QPC_BYTE_28_AT_M,
                                              V2_QPC_BYTE_28_AT_S);
-       qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
+       qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
                                            V2_QPC_BYTE_212_RETRY_CNT_M,
                                            V2_QPC_BYTE_212_RETRY_CNT_S);
-       qp_attr->rnr_retry = context->rq_rnr_timer;
+       qp_attr->rnr_retry = context.rq_rnr_timer;
 
 done:
        qp_attr->cur_qp_state = qp_attr->qp_state;
@@ -4518,7 +4553,6 @@ done:
 
 out:
        mutex_unlock(&hr_qp->mutex);
-       kfree(context);
        return ret;
 }
 
@@ -4527,7 +4561,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
                                         struct ib_udata *udata)
 {
        struct hns_roce_cq *send_cq, *recv_cq;
-       struct device *dev = hr_dev->dev;
+       struct ib_device *ibdev = &hr_dev->ib_dev;
        int ret;
 
        if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
@@ -4535,8 +4569,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
                ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
                                            hr_qp->state, IB_QPS_RESET);
                if (ret) {
-                       dev_err(dev, "modify QP %06lx to ERR failed.\n",
-                               hr_qp->qpn);
+                       ibdev_err(ibdev, "modify QP to Reset failed.\n");
                        return ret;
                }
        }
@@ -4605,7 +4638,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
 
        ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
        if (ret) {
-               dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
+               ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
+                         hr_qp->qpn, ret);
                return ret;
        }
 
@@ -4904,7 +4938,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
                               struct hns_roce_eq *eq)
 {
        struct device *dev = hr_dev->dev;
-       struct hns_roce_aeqe *aeqe;
+       struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
        int aeqe_found = 0;
        int event_type;
        int sub_type;
@@ -4912,8 +4946,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
        u32 qpn;
        u32 cqn;
 
-       while ((aeqe = next_aeqe_sw_v2(eq))) {
-
+       while (aeqe) {
                /* Make sure we read AEQ entry after we have checked the
                 * ownership bit
                 */
@@ -4977,11 +5010,12 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
                ++eq->cons_index;
                aeqe_found = 1;
 
-               if (eq->cons_index > (2 * eq->entries - 1)) {
-                       dev_warn(dev, "cons_index overflow, set back to 0.\n");
+               if (eq->cons_index > (2 * eq->entries - 1))
                        eq->cons_index = 0;
-               }
+
                hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
+
+               aeqe = next_aeqe_sw_v2(eq);
        }
 
        set_eq_cons_index_v2(eq);
@@ -5034,12 +5068,11 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
                               struct hns_roce_eq *eq)
 {
        struct device *dev = hr_dev->dev;
-       struct hns_roce_ceqe *ceqe;
+       struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
        int ceqe_found = 0;
        u32 cqn;
 
-       while ((ceqe = next_ceqe_sw_v2(eq))) {
-
+       while (ceqe) {
                /* Make sure we read CEQ entry after we have checked the
                 * ownership bit
                 */
@@ -5058,6 +5091,8 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
                        dev_warn(dev, "cons_index overflow, set back to 0.\n");
                        eq->cons_index = 0;
                }
+
+               ceqe = next_ceqe_sw_v2(eq);
        }
 
        set_eq_cons_index_v2(eq);
@@ -5202,14 +5237,12 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
        buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
        bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
 
-       /* hop_num = 0 */
        if (mhop_num == HNS_ROCE_HOP_NUM_0) {
                dma_free_coherent(dev, (unsigned int)(eq->entries *
                                  eq->eqe_size), eq->bt_l0, eq->l0_dma);
                return;
        }
 
-       /* hop_num = 1 or hop = 2 */
        dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
        if (mhop_num == 1) {
                for (i = 0; i < eq->l0_last_num; i++) {
@@ -5449,7 +5482,6 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
                              buf_chk_sz);
        bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
 
-       /* hop_num = 0 */
        if (mhop_num == HNS_ROCE_HOP_NUM_0) {
                if (eq->entries > buf_chk_sz / eq->eqe_size) {
                        dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
@@ -5515,7 +5547,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
                                break;
                }
                eq->cur_eqe_ba = eq->buf_dma[0];
-               eq->nxt_eqe_ba = eq->buf_dma[1];
+               if (ba_num > 1)
+                       eq->nxt_eqe_ba = eq->buf_dma[1];
 
        } else if (mhop_num == 2) {
                /* alloc L1 BT and buf */
@@ -5556,7 +5589,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
                                break;
                }
                eq->cur_eqe_ba = eq->buf_dma[0];
-               eq->nxt_eqe_ba = eq->buf_dma[1];
+               if (ba_num > 1)
+                       eq->nxt_eqe_ba = eq->buf_dma[1];
        }
 
        eq->l0_last_num = i + 1;
@@ -5699,6 +5733,95 @@ free_cmd_mbox:
        return ret;
 }
 
+static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
+                                 int comp_num, int aeq_num, int other_num)
+{
+       struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+       int i, j;
+       int ret;
+
+       for (i = 0; i < irq_num; i++) {
+               hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
+                                              GFP_KERNEL);
+               if (!hr_dev->irq_names[i]) {
+                       ret = -ENOMEM;
+                       goto err_kzalloc_failed;
+               }
+       }
+
+       /* irq contains: abnormal + AEQ + CEQ */
+       for (j = 0; j < other_num; j++)
+               snprintf((char *)hr_dev->irq_names[j],
+                        HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j);
+
+       for (j = other_num; j < (other_num + aeq_num); j++)
+               snprintf((char *)hr_dev->irq_names[j],
+                        HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
+                        j - other_num);
+
+       for (j = (other_num + aeq_num); j < irq_num; j++)
+               snprintf((char *)hr_dev->irq_names[j],
+                        HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
+                        j - other_num - aeq_num);
+
+       for (j = 0; j < irq_num; j++) {
+               if (j < other_num)
+                       ret = request_irq(hr_dev->irq[j],
+                                         hns_roce_v2_msix_interrupt_abn,
+                                         0, hr_dev->irq_names[j], hr_dev);
+
+               else if (j < (other_num + comp_num))
+                       ret = request_irq(eq_table->eq[j - other_num].irq,
+                                         hns_roce_v2_msix_interrupt_eq,
+                                         0, hr_dev->irq_names[j + aeq_num],
+                                         &eq_table->eq[j - other_num]);
+               else
+                       ret = request_irq(eq_table->eq[j - other_num].irq,
+                                         hns_roce_v2_msix_interrupt_eq,
+                                         0, hr_dev->irq_names[j - comp_num],
+                                         &eq_table->eq[j - other_num]);
+               if (ret) {
+                       dev_err(hr_dev->dev, "Request irq error!\n");
+                       goto err_request_failed;
+               }
+       }
+
+       return 0;
+
+err_request_failed:
+       for (j -= 1; j >= 0; j--)
+               if (j < other_num)
+                       free_irq(hr_dev->irq[j], hr_dev);
+               else
+                       free_irq(eq_table->eq[j - other_num].irq,
+                                &eq_table->eq[j - other_num]);
+
+err_kzalloc_failed:
+       for (i -= 1; i >= 0; i--)
+               kfree(hr_dev->irq_names[i]);
+
+       return ret;
+}
+
+static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
+{
+       int irq_num;
+       int eq_num;
+       int i;
+
+       eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+       irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+       for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
+               free_irq(hr_dev->irq[i], hr_dev);
+
+       for (i = 0; i < eq_num; i++)
+               free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
+
+       for (i = 0; i < irq_num; i++)
+               kfree(hr_dev->irq_names[i]);
+}
+
 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
 {
        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
@@ -5710,7 +5833,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
        int other_num;
        int comp_num;
        int aeq_num;
-       int i, j, k;
+       int i;
        int ret;
 
        other_num = hr_dev->caps.num_other_vectors;
@@ -5724,27 +5847,18 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
        if (!eq_table->eq)
                return -ENOMEM;
 
-       for (i = 0; i < irq_num; i++) {
-               hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
-                                              GFP_KERNEL);
-               if (!hr_dev->irq_names[i]) {
-                       ret = -ENOMEM;
-                       goto err_failed_kzalloc;
-               }
-       }
-
        /* create eq */
-       for (j = 0; j < eq_num; j++) {
-               eq = &eq_table->eq[j];
+       for (i = 0; i < eq_num; i++) {
+               eq = &eq_table->eq[i];
                eq->hr_dev = hr_dev;
-               eq->eqn = j;
-               if (j < comp_num) {
+               eq->eqn = i;
+               if (i < comp_num) {
                        /* CEQ */
                        eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
                        eq->type_flag = HNS_ROCE_CEQ;
                        eq->entries = hr_dev->caps.ceqe_depth;
                        eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
-                       eq->irq = hr_dev->irq[j + other_num + aeq_num];
+                       eq->irq = hr_dev->irq[i + other_num + aeq_num];
                        eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
                        eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
                } else {
@@ -5753,7 +5867,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
                        eq->type_flag = HNS_ROCE_AEQ;
                        eq->entries = hr_dev->caps.aeqe_depth;
                        eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
-                       eq->irq = hr_dev->irq[j - comp_num + other_num];
+                       eq->irq = hr_dev->irq[i - comp_num + other_num];
                        eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
                        eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
                }
@@ -5768,40 +5882,11 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
        /* enable irq */
        hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
 
-       /* irq contains: abnormal + AEQ + CEQ*/
-       for (k = 0; k < irq_num; k++)
-               if (k < other_num)
-                       snprintf((char *)hr_dev->irq_names[k],
-                                HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
-               else if (k < (other_num + aeq_num))
-                       snprintf((char *)hr_dev->irq_names[k],
-                                HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
-                                k - other_num);
-               else
-                       snprintf((char *)hr_dev->irq_names[k],
-                                HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
-                                k - other_num - aeq_num);
-
-       for (k = 0; k < irq_num; k++) {
-               if (k < other_num)
-                       ret = request_irq(hr_dev->irq[k],
-                                         hns_roce_v2_msix_interrupt_abn,
-                                         0, hr_dev->irq_names[k], hr_dev);
-
-               else if (k < (other_num + comp_num))
-                       ret = request_irq(eq_table->eq[k - other_num].irq,
-                                         hns_roce_v2_msix_interrupt_eq,
-                                         0, hr_dev->irq_names[k + aeq_num],
-                                         &eq_table->eq[k - other_num]);
-               else
-                       ret = request_irq(eq_table->eq[k - other_num].irq,
-                                         hns_roce_v2_msix_interrupt_eq,
-                                         0, hr_dev->irq_names[k - comp_num],
-                                         &eq_table->eq[k - other_num]);
-               if (ret) {
-                       dev_err(dev, "Request irq error!\n");
-                       goto err_request_irq_fail;
-               }
+       ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
+                                    aeq_num, other_num);
+       if (ret) {
+               dev_err(dev, "Request irq failed.\n");
+               goto err_request_irq_fail;
        }
 
        hr_dev->irq_workq =
@@ -5809,26 +5894,20 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
        if (!hr_dev->irq_workq) {
                dev_err(dev, "Create irq workqueue failed!\n");
                ret = -ENOMEM;
-               goto err_request_irq_fail;
+               goto err_create_wq_fail;
        }
 
        return 0;
 
+err_create_wq_fail:
+       __hns_roce_free_irq(hr_dev);
+
 err_request_irq_fail:
-       for (k -= 1; k >= 0; k--)
-               if (k < other_num)
-                       free_irq(hr_dev->irq[k], hr_dev);
-               else
-                       free_irq(eq_table->eq[k - other_num].irq,
-                                &eq_table->eq[k - other_num]);
+       hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
 
 err_create_eq_fail:
-       for (j -= 1; j >= 0; j--)
-               hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
-
-err_failed_kzalloc:
        for (i -= 1; i >= 0; i--)
-               kfree(hr_dev->irq_names[i]);
+               hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
        kfree(eq_table->eq);
 
        return ret;
@@ -5837,30 +5916,22 @@ err_failed_kzalloc:
 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
 {
        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
-       int irq_num;
        int eq_num;
        int i;
 
        eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
-       irq_num = eq_num + hr_dev->caps.num_other_vectors;
 
        /* Disable irq */
        hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
 
-       for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
-               free_irq(hr_dev->irq[i], hr_dev);
+       __hns_roce_free_irq(hr_dev);
 
        for (i = 0; i < eq_num; i++) {
                hns_roce_v2_destroy_eqc(hr_dev, i);
 
-               free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
-
                hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
        }
 
-       for (i = 0; i < irq_num; i++)
-               kfree(hr_dev->irq_names[i]);
-
        kfree(eq_table->eq);
 
        flush_workqueue(hr_dev->irq_workq);
index 478f5a5b7aa1da0481bafe7b915f6c2ccfa9328c..58931b5399f89f385ec8e83ccd1416cf1b2e20f9 100644 (file)
 #define HNS_ROCE_CMD_FLAG_ERR_INTR     BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT)
 
 #define HNS_ROCE_CMQ_DESC_NUM_S                3
-#define HNS_ROCE_CMQ_EN_B              16
-#define HNS_ROCE_CMQ_ENABLE            BIT(HNS_ROCE_CMQ_EN_B)
 
 #define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT          5
 
index 1e4ba48f56136e2f4a2c9371ecdb13e33894f8eb..1b757cc924c3d3fed29a13fee45d0d11cc5e206f 100644 (file)
@@ -262,7 +262,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
        props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
                        IB_PORT_ACTIVE : IB_PORT_DOWN;
        props->phys_state = (props->state == IB_PORT_ACTIVE) ?
-                            HNS_ROCE_PHY_LINKUP : HNS_ROCE_PHY_DISABLED;
+                            IB_PORT_PHYS_STATE_LINK_UP :
+                            IB_PORT_PHYS_STATE_DISABLED;
 
        spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
 
index 549e1a38dfe0be1ff41a78650695c6495205a605..8157679021b9362de880c92476692c66747e9915 100644 (file)
@@ -347,155 +347,207 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
        mr->pbl_bt_l0 = NULL;
        mr->pbl_l0_dma_addr = 0;
 }
+static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
+                              struct hns_roce_mr *mr, u32 pbl_bt_sz)
+{
+       struct device *dev = hr_dev->dev;
 
-/* PBL multi hop addressing */
-static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
-                              struct hns_roce_mr *mr)
+       if (npages > pbl_bt_sz / 8) {
+               dev_err(dev, "npages %d is larger than buf_pg_sz!",
+                       npages);
+               return -EINVAL;
+       }
+       mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+                                        &(mr->pbl_dma_addr),
+                                        GFP_KERNEL);
+       if (!mr->pbl_buf)
+               return -ENOMEM;
+
+       mr->pbl_size = npages;
+       mr->pbl_ba = mr->pbl_dma_addr;
+       mr->pbl_hop_num = 1;
+       mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+       mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+       return 0;
+
+}
+
+
+static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
+                              struct hns_roce_mr *mr, u32 pbl_bt_sz)
 {
        struct device *dev = hr_dev->dev;
-       int mr_alloc_done = 0;
        int npages_allocated;
-       int i = 0, j = 0;
-       u32 pbl_bt_sz;
-       u32 mhop_num;
        u64 pbl_last_bt_num;
        u64 pbl_bt_cnt = 0;
-       u64 bt_idx;
        u64 size;
+       int i;
 
-       mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
-       pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
        pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
 
-       if (mhop_num == HNS_ROCE_HOP_NUM_0)
-               return 0;
-
-       /* hop_num = 1 */
-       if (mhop_num == 1) {
-               if (npages > pbl_bt_sz / 8) {
-                       dev_err(dev, "npages %d is larger than buf_pg_sz!",
-                               npages);
-                       return -EINVAL;
+       /* alloc L1 BT */
+       for (i = 0; i < pbl_bt_sz / 8; i++) {
+               if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+                       size = pbl_bt_sz;
+               } else {
+                       npages_allocated = i * (pbl_bt_sz / 8);
+                       size = (npages - npages_allocated) * 8;
                }
-               mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
-                                                &(mr->pbl_dma_addr),
-                                                GFP_KERNEL);
-               if (!mr->pbl_buf)
+               mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
+                                           &(mr->pbl_l1_dma_addr[i]),
+                                           GFP_KERNEL);
+               if (!mr->pbl_bt_l1[i]) {
+                       hns_roce_loop_free(hr_dev, mr, 1, i, 0);
                        return -ENOMEM;
+               }
 
-               mr->pbl_size = npages;
-               mr->pbl_ba = mr->pbl_dma_addr;
-               mr->pbl_hop_num = mhop_num;
-               mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
-               mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
-               return 0;
+               *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+               pbl_bt_cnt++;
+               if (pbl_bt_cnt >= pbl_last_bt_num)
+                       break;
        }
 
-       mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
-                                     sizeof(*mr->pbl_l1_dma_addr),
+       mr->l0_chunk_last_num = i + 1;
+
+       return 0;
+}
+
+static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
+                              struct hns_roce_mr *mr, u32 pbl_bt_sz)
+{
+       struct device *dev = hr_dev->dev;
+       int mr_alloc_done = 0;
+       int npages_allocated;
+       u64 pbl_last_bt_num;
+       u64 pbl_bt_cnt = 0;
+       u64 bt_idx;
+       u64 size;
+       int i;
+       int j = 0;
+
+       pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
+
+       mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
+                                     sizeof(*mr->pbl_l2_dma_addr),
                                      GFP_KERNEL);
-       if (!mr->pbl_l1_dma_addr)
+       if (!mr->pbl_l2_dma_addr)
                return -ENOMEM;
 
-       mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
+       mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
+                               sizeof(*mr->pbl_bt_l2),
                                GFP_KERNEL);
-       if (!mr->pbl_bt_l1)
-               goto err_kcalloc_bt_l1;
-
-       if (mhop_num == 3) {
-               mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
-                                             sizeof(*mr->pbl_l2_dma_addr),
-                                             GFP_KERNEL);
-               if (!mr->pbl_l2_dma_addr)
-                       goto err_kcalloc_l2_dma;
+       if (!mr->pbl_bt_l2)
+               goto err_kcalloc_bt_l2;
+
+       /* alloc L1, L2 BT */
+       for (i = 0; i < pbl_bt_sz / 8; i++) {
+               mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
+                                           &(mr->pbl_l1_dma_addr[i]),
+                                           GFP_KERNEL);
+               if (!mr->pbl_bt_l1[i]) {
+                       hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+                       goto err_dma_alloc_l0;
+               }
 
-               mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
-                                       sizeof(*mr->pbl_bt_l2),
-                                       GFP_KERNEL);
-               if (!mr->pbl_bt_l2)
-                       goto err_kcalloc_bt_l2;
-       }
+               *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
 
-       /* alloc L0 BT */
-       mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
-                                          &(mr->pbl_l0_dma_addr),
-                                          GFP_KERNEL);
-       if (!mr->pbl_bt_l0)
-               goto err_dma_alloc_l0;
+               for (j = 0; j < pbl_bt_sz / 8; j++) {
+                       bt_idx = i * pbl_bt_sz / 8 + j;
 
-       if (mhop_num == 2) {
-               /* alloc L1 BT */
-               for (i = 0; i < pbl_bt_sz / 8; i++) {
                        if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
                                size = pbl_bt_sz;
                        } else {
-                               npages_allocated = i * (pbl_bt_sz / 8);
+                               npages_allocated = bt_idx *
+                                                  (pbl_bt_sz / 8);
                                size = (npages - npages_allocated) * 8;
                        }
-                       mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
-                                                   &(mr->pbl_l1_dma_addr[i]),
-                                                   GFP_KERNEL);
-                       if (!mr->pbl_bt_l1[i]) {
-                               hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+                       mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
+                                     dev, size,
+                                     &(mr->pbl_l2_dma_addr[bt_idx]),
+                                     GFP_KERNEL);
+                       if (!mr->pbl_bt_l2[bt_idx]) {
+                               hns_roce_loop_free(hr_dev, mr, 2, i, j);
                                goto err_dma_alloc_l0;
                        }
 
-                       *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+                       *(mr->pbl_bt_l1[i] + j) =
+                                       mr->pbl_l2_dma_addr[bt_idx];
 
                        pbl_bt_cnt++;
-                       if (pbl_bt_cnt >= pbl_last_bt_num)
+                       if (pbl_bt_cnt >= pbl_last_bt_num) {
+                               mr_alloc_done = 1;
                                break;
-               }
-       } else if (mhop_num == 3) {
-               /* alloc L1, L2 BT */
-               for (i = 0; i < pbl_bt_sz / 8; i++) {
-                       mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
-                                                   &(mr->pbl_l1_dma_addr[i]),
-                                                   GFP_KERNEL);
-                       if (!mr->pbl_bt_l1[i]) {
-                               hns_roce_loop_free(hr_dev, mr, 1, i, 0);
-                               goto err_dma_alloc_l0;
                        }
+               }
 
-                       *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+               if (mr_alloc_done)
+                       break;
+       }
 
-                       for (j = 0; j < pbl_bt_sz / 8; j++) {
-                               bt_idx = i * pbl_bt_sz / 8 + j;
+       mr->l0_chunk_last_num = i + 1;
+       mr->l1_chunk_last_num = j + 1;
 
-                               if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
-                                       size = pbl_bt_sz;
-                               } else {
-                                       npages_allocated = bt_idx *
-                                                          (pbl_bt_sz / 8);
-                                       size = (npages - npages_allocated) * 8;
-                               }
-                               mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
-                                             dev, size,
-                                             &(mr->pbl_l2_dma_addr[bt_idx]),
-                                             GFP_KERNEL);
-                               if (!mr->pbl_bt_l2[bt_idx]) {
-                                       hns_roce_loop_free(hr_dev, mr, 2, i, j);
-                                       goto err_dma_alloc_l0;
-                               }
 
-                               *(mr->pbl_bt_l1[i] + j) =
-                                               mr->pbl_l2_dma_addr[bt_idx];
+       return 0;
 
-                               pbl_bt_cnt++;
-                               if (pbl_bt_cnt >= pbl_last_bt_num) {
-                                       mr_alloc_done = 1;
-                                       break;
-                               }
-                       }
+err_dma_alloc_l0:
+       kfree(mr->pbl_bt_l2);
+       mr->pbl_bt_l2 = NULL;
 
-                       if (mr_alloc_done)
-                               break;
-               }
+err_kcalloc_bt_l2:
+       kfree(mr->pbl_l2_dma_addr);
+       mr->pbl_l2_dma_addr = NULL;
+
+       return -ENOMEM;
+}
+
+
+/* PBL multi hop addressing */
+static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
+                              struct hns_roce_mr *mr)
+{
+       struct device *dev = hr_dev->dev;
+       u32 pbl_bt_sz;
+       u32 mhop_num;
+
+       mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
+       pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+
+       if (mhop_num == HNS_ROCE_HOP_NUM_0)
+               return 0;
+
+       if (mhop_num == 1)
+               return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
+
+       mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
+                                     sizeof(*mr->pbl_l1_dma_addr),
+                                     GFP_KERNEL);
+       if (!mr->pbl_l1_dma_addr)
+               return -ENOMEM;
+
+       mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
+                               GFP_KERNEL);
+       if (!mr->pbl_bt_l1)
+               goto err_kcalloc_bt_l1;
+
+       /* alloc L0 BT */
+       mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
+                                          &(mr->pbl_l0_dma_addr),
+                                          GFP_KERNEL);
+       if (!mr->pbl_bt_l0)
+               goto err_kcalloc_l2_dma;
+
+       if (mhop_num == 2) {
+               if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
+                       goto err_kcalloc_l2_dma;
+       }
+
+       if (mhop_num == 3) {
+               if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
+                       goto err_kcalloc_l2_dma;
        }
 
-       mr->l0_chunk_last_num = i + 1;
-       if (mhop_num == 3)
-               mr->l1_chunk_last_num = j + 1;
 
        mr->pbl_size = npages;
        mr->pbl_ba = mr->pbl_l0_dma_addr;
@@ -505,14 +557,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
 
        return 0;
 
-err_dma_alloc_l0:
-       kfree(mr->pbl_bt_l2);
-       mr->pbl_bt_l2 = NULL;
-
-err_kcalloc_bt_l2:
-       kfree(mr->pbl_l2_dma_addr);
-       mr->pbl_l2_dma_addr = NULL;
-
 err_kcalloc_l2_dma:
        kfree(mr->pbl_bt_l1);
        mr->pbl_bt_l1 = NULL;
@@ -1161,6 +1205,83 @@ err_free:
        return ERR_PTR(ret);
 }
 
+static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
+                         u64 start, u64 length,
+                         u64 virt_addr, int mr_access_flags,
+                         struct hns_roce_cmd_mailbox *mailbox,
+                         u32 pdn, struct ib_udata *udata)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+       struct hns_roce_mr *mr = to_hr_mr(ibmr);
+       struct device *dev = hr_dev->dev;
+       int npages;
+       int ret;
+
+       if (mr->size != ~0ULL) {
+               npages = ib_umem_page_count(mr->umem);
+
+               if (hr_dev->caps.pbl_hop_num)
+                       hns_roce_mhop_free(hr_dev, mr);
+               else
+                       dma_free_coherent(dev, npages * 8,
+                                         mr->pbl_buf, mr->pbl_dma_addr);
+       }
+       ib_umem_release(mr->umem);
+
+       mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+       if (IS_ERR(mr->umem)) {
+               ret = PTR_ERR(mr->umem);
+               mr->umem = NULL;
+               return -ENOMEM;
+       }
+       npages = ib_umem_page_count(mr->umem);
+
+       if (hr_dev->caps.pbl_hop_num) {
+               ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+               if (ret)
+                       goto release_umem;
+       } else {
+               mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+                                                &(mr->pbl_dma_addr),
+                                                GFP_KERNEL);
+               if (!mr->pbl_buf) {
+                       ret = -ENOMEM;
+                       goto release_umem;
+               }
+       }
+
+       ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
+                                          mr_access_flags, virt_addr,
+                                          length, mailbox->buf);
+       if (ret)
+               goto release_umem;
+
+
+       ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
+       if (ret) {
+               if (mr->size != ~0ULL) {
+                       npages = ib_umem_page_count(mr->umem);
+
+                       if (hr_dev->caps.pbl_hop_num)
+                               hns_roce_mhop_free(hr_dev, mr);
+                       else
+                               dma_free_coherent(dev, npages * 8,
+                                                 mr->pbl_buf,
+                                                 mr->pbl_dma_addr);
+               }
+
+               goto release_umem;
+       }
+
+       return 0;
+
+release_umem:
+       ib_umem_release(mr->umem);
+       return ret;
+
+}
+
+
 int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
                           u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
                           struct ib_udata *udata)
@@ -1171,7 +1292,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
        struct device *dev = hr_dev->dev;
        unsigned long mtpt_idx;
        u32 pdn = 0;
-       int npages;
        int ret;
 
        if (!mr->enabled)
@@ -1198,73 +1318,25 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
                pdn = to_hr_pd(pd)->pdn;
 
        if (flags & IB_MR_REREG_TRANS) {
-               if (mr->size != ~0ULL) {
-                       npages = ib_umem_page_count(mr->umem);
-
-                       if (hr_dev->caps.pbl_hop_num)
-                               hns_roce_mhop_free(hr_dev, mr);
-                       else
-                               dma_free_coherent(dev, npages * 8, mr->pbl_buf,
-                                                 mr->pbl_dma_addr);
-               }
-               ib_umem_release(mr->umem);
-
-               mr->umem =
-                       ib_umem_get(udata, start, length, mr_access_flags, 0);
-               if (IS_ERR(mr->umem)) {
-                       ret = PTR_ERR(mr->umem);
-                       mr->umem = NULL;
+               ret = rereg_mr_trans(ibmr, flags,
+                                    start, length,
+                                    virt_addr, mr_access_flags,
+                                    mailbox, pdn, udata);
+               if (ret)
                        goto free_cmd_mbox;
-               }
-               npages = ib_umem_page_count(mr->umem);
-
-               if (hr_dev->caps.pbl_hop_num) {
-                       ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
-                       if (ret)
-                               goto release_umem;
-               } else {
-                       mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
-                                                        &(mr->pbl_dma_addr),
-                                                        GFP_KERNEL);
-                       if (!mr->pbl_buf) {
-                               ret = -ENOMEM;
-                               goto release_umem;
-                       }
-               }
-       }
-
-       ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
-                                          mr_access_flags, virt_addr,
-                                          length, mailbox->buf);
-       if (ret) {
-               if (flags & IB_MR_REREG_TRANS)
-                       goto release_umem;
-               else
+       } else {
+               ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
+                                                  mr_access_flags, virt_addr,
+                                                  length, mailbox->buf);
+               if (ret)
                        goto free_cmd_mbox;
        }
 
-       if (flags & IB_MR_REREG_TRANS) {
-               ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
-               if (ret) {
-                       if (mr->size != ~0ULL) {
-                               npages = ib_umem_page_count(mr->umem);
-
-                               if (hr_dev->caps.pbl_hop_num)
-                                       hns_roce_mhop_free(hr_dev, mr);
-                               else
-                                       dma_free_coherent(dev, npages * 8,
-                                                         mr->pbl_buf,
-                                                         mr->pbl_dma_addr);
-                       }
-
-                       goto release_umem;
-               }
-       }
-
        ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
        if (ret) {
                dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
-               goto release_umem;
+               ib_umem_release(mr->umem);
+               goto free_cmd_mbox;
        }
 
        mr->enabled = 1;
@@ -1275,9 +1347,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
 
        return 0;
 
-release_umem:
-       ib_umem_release(mr->umem);
-
 free_cmd_mbox:
        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
 
index e0424029b058b1d145209f2adaccda99ac2334c2..ba8176869f51fbb2673b70c05af1d81a1d8e8cae 100644 (file)
@@ -324,31 +324,46 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
        return 0;
 }
 
-static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
-                                    struct ib_qp_cap *cap,
-                                    struct hns_roce_qp *hr_qp,
-                                    struct hns_roce_ib_create_qp *ucmd)
+static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
+                                       struct ib_qp_cap *cap,
+                                       struct hns_roce_ib_create_qp *ucmd)
 {
        u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
        u8 max_sq_stride = ilog2(roundup_sq_stride);
-       u32 ex_sge_num;
-       u32 page_size;
-       u32 max_cnt;
 
        /* Sanity check SQ size before proceeding */
        if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
             ucmd->log_sq_stride > max_sq_stride ||
             ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
-               dev_err(hr_dev->dev, "check SQ size error!\n");
+               ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
                return -EINVAL;
        }
 
        if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
-               dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
-                       cap->max_send_sge);
+               ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n",
+                         cap->max_send_sge);
                return -EINVAL;
        }
 
+       return 0;
+}
+
+static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+                                    struct ib_qp_cap *cap,
+                                    struct hns_roce_qp *hr_qp,
+                                    struct hns_roce_ib_create_qp *ucmd)
+{
+       u32 ex_sge_num;
+       u32 page_size;
+       u32 max_cnt;
+       int ret;
+
+       ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
+       if (ret) {
+               ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
+               return ret;
+       }
+
        hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
        hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
 
@@ -501,6 +516,35 @@ static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
        return bt_pg_shift - PAGE_SHIFT;
 }
 
+static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
+                               struct hns_roce_qp *hr_qp)
+{
+       struct device *dev = hr_dev->dev;
+
+       if (hr_qp->sq.max_gs > 2) {
+               hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+                                    (hr_qp->sq.max_gs - 2));
+               hr_qp->sge.sge_shift = 4;
+       }
+
+       /* ud sqwqe's sge use extend sge */
+       if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+               hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+                                    hr_qp->sq.max_gs);
+               hr_qp->sge.sge_shift = 4;
+       }
+
+       if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
+               if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
+                       dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
+                               hr_qp->sge.sge_cnt);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
                                       struct ib_qp_cap *cap,
                                       struct hns_roce_qp *hr_qp)
@@ -509,6 +553,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
        u32 page_size;
        u32 max_cnt;
        int size;
+       int ret;
 
        if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
            cap->max_send_sge > hr_dev->caps.max_sq_sg ||
@@ -518,8 +563,6 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
        }
 
        hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
-       hr_qp->sq_max_wqes_per_wr = 1;
-       hr_qp->sq_spare_wqes = 0;
 
        if (hr_dev->caps.min_wqes)
                max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
@@ -539,25 +582,10 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
        else
                hr_qp->sq.max_gs = max_cnt;
 
-       if (hr_qp->sq.max_gs > 2) {
-               hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
-                                    (hr_qp->sq.max_gs - 2));
-               hr_qp->sge.sge_shift = 4;
-       }
-
-       /* ud sqwqe's sge use extend sge */
-       if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
-               hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
-                                    hr_qp->sq.max_gs);
-               hr_qp->sge.sge_shift = 4;
-       }
-
-       if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
-               if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
-                       dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
-                               hr_qp->sge.sge_cnt);
-                       return -EINVAL;
-               }
+       ret = set_extend_sge_param(hr_dev, hr_qp);
+       if (ret) {
+               dev_err(dev, "set extend sge parameters fail\n");
+               return ret;
        }
 
        /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
@@ -826,11 +854,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 
                hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
                                         GFP_KERNEL);
-               hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
-                                        GFP_KERNEL);
-               if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
+               if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) {
                        ret = -ENOMEM;
-                       goto err_wrid;
+                       goto err_get_bufs;
+               }
+
+               if (hr_qp->rq.wqe_cnt) {
+                       hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
+                                                GFP_KERNEL);
+                       if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) {
+                               ret = -ENOMEM;
+                               goto err_sq_wrid;
+                       }
                }
        }
 
@@ -916,8 +951,8 @@ err_wrid:
                    hns_roce_qp_has_rq(init_attr))
                        hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
        } else {
-               kfree(hr_qp->sq.wrid);
-               kfree(hr_qp->rq.wrid);
+               if (hr_qp->rq.wqe_cnt)
+                       kfree(hr_qp->rq.wrid);
        }
 
 err_sq_dbmap:
@@ -928,6 +963,10 @@ err_sq_dbmap:
                    hns_roce_qp_has_sq(init_attr))
                        hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
 
+err_sq_wrid:
+       if (!udata)
+               kfree(hr_qp->sq.wrid);
+
 err_get_bufs:
        hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
 
@@ -942,11 +981,13 @@ err_db:
                hns_roce_free_db(hr_dev, &hr_qp->rdb);
 
 err_rq_sge_list:
-       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+       if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+            hns_roce_qp_has_rq(init_attr))
                kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
 
 err_wqe_list:
-       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+       if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
+            hns_roce_qp_has_rq(init_attr))
                kfree(hr_qp->rq_inl_buf.wqe_list);
 
 err_out:
@@ -958,7 +999,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
                                 struct ib_udata *udata)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
-       struct device *dev = hr_dev->dev;
+       struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_sqp *hr_sqp;
        struct hns_roce_qp *hr_qp;
        int ret;
@@ -972,7 +1013,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
                ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
                                                hr_qp);
                if (ret) {
-                       dev_err(dev, "Create RC QP failed\n");
+                       ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n",
+                                 hr_qp->qpn, ret);
                        kfree(hr_qp);
                        return ERR_PTR(ret);
                }
@@ -984,7 +1026,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
        case IB_QPT_GSI: {
                /* Userspace is not allowed to create special QPs: */
                if (udata) {
-                       dev_err(dev, "not support usr space GSI\n");
+                       ibdev_err(ibdev, "not support usr space GSI\n");
                        return ERR_PTR(-EINVAL);
                }
 
@@ -1006,7 +1048,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
                ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
                                                hr_qp->ibqp.qp_num, hr_qp);
                if (ret) {
-                       dev_err(dev, "Create GSI QP failed!\n");
+                       ibdev_err(ibdev, "Create GSI QP failed!\n");
                        kfree(hr_sqp);
                        return ERR_PTR(ret);
                }
@@ -1014,7 +1056,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
                break;
        }
        default:{
-               dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
+               ibdev_err(ibdev, "not support QP type %d\n",
+                         init_attr->qp_type);
                return ERR_PTR(-EINVAL);
        }
        }
@@ -1040,23 +1083,88 @@ int to_hr_qp_type(int qp_type)
        return transport_type;
 }
 
+static int check_mtu_validate(struct hns_roce_dev *hr_dev,
+                             struct hns_roce_qp *hr_qp,
+                             struct ib_qp_attr *attr, int attr_mask)
+{
+       enum ib_mtu active_mtu;
+       int p;
+
+       p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+       active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+
+       if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
+           attr->path_mtu > hr_dev->caps.max_mtu) ||
+           attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
+               ibdev_err(&hr_dev->ib_dev,
+                       "attr path_mtu(%d)invalid while modify qp",
+                       attr->path_mtu);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+                                 int attr_mask)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+       int p;
+
+       if ((attr_mask & IB_QP_PORT) &&
+           (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
+               ibdev_err(&hr_dev->ib_dev,
+                       "attr port_num invalid.attr->port_num=%d\n",
+                       attr->port_num);
+               return -EINVAL;
+       }
+
+       if (attr_mask & IB_QP_PKEY_INDEX) {
+               p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+               if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
+                       ibdev_err(&hr_dev->ib_dev,
+                               "attr pkey_index invalid.attr->pkey_index=%d\n",
+                               attr->pkey_index);
+                       return -EINVAL;
+               }
+       }
+
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+           attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
+               ibdev_err(&hr_dev->ib_dev,
+                       "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
+                       attr->max_rd_atomic);
+               return -EINVAL;
+       }
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+           attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
+               ibdev_err(&hr_dev->ib_dev,
+                       "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
+                       attr->max_dest_rd_atomic);
+               return -EINVAL;
+       }
+
+       if (attr_mask & IB_QP_PATH_MTU)
+               return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
+
+       return 0;
+}
+
 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                       int attr_mask, struct ib_udata *udata)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
        enum ib_qp_state cur_state, new_state;
-       struct device *dev = hr_dev->dev;
        int ret = -EINVAL;
-       int p;
-       enum ib_mtu active_mtu;
 
        mutex_lock(&hr_qp->mutex);
 
        cur_state = attr_mask & IB_QP_CUR_STATE ?
                    attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
-       new_state = attr_mask & IB_QP_STATE ?
-                   attr->qp_state : cur_state;
+       new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
 
        if (ibqp->uobject &&
            (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
@@ -1066,67 +1174,27 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                        if (hr_qp->rdb_en == 1)
                                hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
                } else {
-                       dev_warn(dev, "flush cqe is not supported in userspace!\n");
+                       ibdev_warn(&hr_dev->ib_dev,
+                                 "flush cqe is not supported in userspace!\n");
                        goto out;
                }
        }
 
        if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
                                attr_mask)) {
-               dev_err(dev, "ib_modify_qp_is_ok failed\n");
+               ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
                goto out;
        }
 
-       if ((attr_mask & IB_QP_PORT) &&
-           (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
-               dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
-                       attr->port_num);
-               goto out;
-       }
-
-       if (attr_mask & IB_QP_PKEY_INDEX) {
-               p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
-               if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
-                       dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
-                               attr->pkey_index);
-                       goto out;
-               }
-       }
-
-       if (attr_mask & IB_QP_PATH_MTU) {
-               p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
-               active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
-
-               if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
-                   attr->path_mtu > IB_MTU_4096) ||
-                   (hr_dev->caps.max_mtu == IB_MTU_2048 &&
-                   attr->path_mtu > IB_MTU_2048) ||
-                   attr->path_mtu < IB_MTU_256 ||
-                   attr->path_mtu > active_mtu) {
-                       dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
-                               attr->path_mtu);
-                       goto out;
-               }
-       }
-
-       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-           attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
-               dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
-                       attr->max_rd_atomic);
-               goto out;
-       }
-
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
-           attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
-               dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
-                       attr->max_dest_rd_atomic);
+       ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
+       if (ret)
                goto out;
-       }
 
        if (cur_state == new_state && cur_state == IB_QPS_RESET) {
                if (hr_dev->caps.min_wqes) {
                        ret = -EPERM;
-                       dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
+                       ibdev_err(&hr_dev->ib_dev,
+                               "cur_state=%d new_state=%d\n", cur_state,
                                new_state);
                } else {
                        ret = 0;
index 38bb548eaa6d8047b2b996c5396bc20f1da51c2d..c011422112b2b31388f5a34f4ed893a8d3920a2b 100644 (file)
@@ -175,6 +175,91 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
        hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
 }
 
+static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
+                          int srq_buf_size)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+       struct hns_roce_ib_create_srq  ucmd;
+       u32 page_shift;
+       u32 npages;
+       int ret;
+
+       if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+               return -EFAULT;
+
+       srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
+       if (IS_ERR(srq->umem))
+               return PTR_ERR(srq->umem);
+
+       if (hr_dev->caps.srqwqe_buf_pg_sz) {
+               npages = (ib_umem_page_count(srq->umem) +
+                        (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
+                        (1 << hr_dev->caps.srqwqe_buf_pg_sz);
+               page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+               ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
+       } else
+               ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->umem),
+                                       PAGE_SHIFT, &srq->mtt);
+       if (ret)
+               goto err_user_buf;
+
+       ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
+       if (ret)
+               goto err_user_srq_mtt;
+
+       /* config index queue BA */
+       srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
+                                       srq->idx_que.buf_size, 0, 0);
+       if (IS_ERR(srq->idx_que.umem)) {
+               dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
+               ret = PTR_ERR(srq->idx_que.umem);
+               goto err_user_srq_mtt;
+       }
+
+       if (hr_dev->caps.idx_buf_pg_sz) {
+               npages = (ib_umem_page_count(srq->idx_que.umem) +
+                        (1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
+                        (1 << hr_dev->caps.idx_buf_pg_sz);
+               page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
+               ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
+                                       &srq->idx_que.mtt);
+       } else {
+               ret = hns_roce_mtt_init(hr_dev,
+                                       ib_umem_page_count(srq->idx_que.umem),
+                                       PAGE_SHIFT,
+                                       &srq->idx_que.mtt);
+       }
+
+       if (ret) {
+               dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
+               goto err_user_idx_mtt;
+       }
+
+       ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
+                                        srq->idx_que.umem);
+       if (ret) {
+               dev_err(hr_dev->dev,
+                       "hns_roce_ib_umem_write_mtt error for idx que\n");
+               goto err_user_idx_buf;
+       }
+
+       return 0;
+
+err_user_idx_buf:
+       hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
+
+err_user_idx_mtt:
+       ib_umem_release(srq->idx_que.umem);
+
+err_user_srq_mtt:
+       hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
+
+err_user_buf:
+       ib_umem_release(srq->umem);
+
+       return ret;
+}
+
 static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
                                   u32 page_shift)
 {
@@ -196,6 +281,93 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
        return 0;
 }
 
+static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
+       u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+       int ret;
+
+       if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
+                              &srq->buf, page_shift))
+               return -ENOMEM;
+
+       srq->head = 0;
+       srq->tail = srq->max - 1;
+
+       ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
+                               &srq->mtt);
+       if (ret)
+               goto err_kernel_buf;
+
+       ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
+       if (ret)
+               goto err_kernel_srq_mtt;
+
+       page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
+       ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
+       if (ret) {
+               dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
+               goto err_kernel_srq_mtt;
+       }
+
+       /* Init mtt table for idx_que */
+       ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
+                               srq->idx_que.idx_buf.page_shift,
+                               &srq->idx_que.mtt);
+       if (ret)
+               goto err_kernel_create_idx;
+
+       /* Write buffer address into the mtt table */
+       ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
+                                    &srq->idx_que.idx_buf);
+       if (ret)
+               goto err_kernel_idx_buf;
+
+       srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
+       if (!srq->wrid) {
+               ret = -ENOMEM;
+               goto err_kernel_idx_buf;
+       }
+
+       return 0;
+
+err_kernel_idx_buf:
+       hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
+
+err_kernel_create_idx:
+       hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
+                         &srq->idx_que.idx_buf);
+       kfree(srq->idx_que.bitmap);
+
+err_kernel_srq_mtt:
+       hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
+
+err_kernel_buf:
+       hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+
+       return ret;
+}
+
+static void destroy_user_srq(struct hns_roce_dev *hr_dev,
+                            struct hns_roce_srq *srq)
+{
+       hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
+       ib_umem_release(srq->idx_que.umem);
+       hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
+       ib_umem_release(srq->umem);
+}
+
+static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
+                              struct hns_roce_srq *srq, int srq_buf_size)
+{
+       kvfree(srq->wrid);
+       hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
+       hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
+       kfree(srq->idx_que.bitmap);
+       hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
+       hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+}
+
 int hns_roce_create_srq(struct ib_srq *ib_srq,
                        struct ib_srq_init_attr *srq_init_attr,
                        struct ib_udata *udata)
@@ -205,9 +377,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
        struct hns_roce_srq *srq = to_hr_srq(ib_srq);
        int srq_desc_size;
        int srq_buf_size;
-       u32 page_shift;
        int ret = 0;
-       u32 npages;
        u32 cqn;
 
        /* Check the actual SRQ wqe and SRQ sge num */
@@ -233,115 +403,16 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
        srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
 
        if (udata) {
-               struct hns_roce_ib_create_srq  ucmd;
-
-               if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
-                       return -EFAULT;
-
-               srq->umem =
-                       ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
-               if (IS_ERR(srq->umem))
-                       return PTR_ERR(srq->umem);
-
-               if (hr_dev->caps.srqwqe_buf_pg_sz) {
-                       npages = (ib_umem_page_count(srq->umem) +
-                                 (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
-                                 (1 << hr_dev->caps.srqwqe_buf_pg_sz);
-                       page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
-                       ret = hns_roce_mtt_init(hr_dev, npages,
-                                               page_shift,
-                                               &srq->mtt);
-               } else
-                       ret = hns_roce_mtt_init(hr_dev,
-                                               ib_umem_page_count(srq->umem),
-                                               PAGE_SHIFT, &srq->mtt);
-               if (ret)
-                       goto err_buf;
-
-               ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
-               if (ret)
-                       goto err_srq_mtt;
-
-               /* config index queue BA */
-               srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
-                                               srq->idx_que.buf_size, 0, 0);
-               if (IS_ERR(srq->idx_que.umem)) {
-                       dev_err(hr_dev->dev,
-                               "ib_umem_get error for index queue\n");
-                       ret = PTR_ERR(srq->idx_que.umem);
-                       goto err_srq_mtt;
-               }
-
-               if (hr_dev->caps.idx_buf_pg_sz) {
-                       npages = (ib_umem_page_count(srq->idx_que.umem) +
-                                 (1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
-                                 (1 << hr_dev->caps.idx_buf_pg_sz);
-                       page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
-                       ret = hns_roce_mtt_init(hr_dev, npages,
-                                               page_shift, &srq->idx_que.mtt);
-               } else {
-                       ret = hns_roce_mtt_init(
-                               hr_dev, ib_umem_page_count(srq->idx_que.umem),
-                               PAGE_SHIFT, &srq->idx_que.mtt);
-               }
-
-               if (ret) {
-                       dev_err(hr_dev->dev,
-                               "hns_roce_mtt_init error for idx que\n");
-                       goto err_idx_mtt;
-               }
-
-               ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
-                                                srq->idx_que.umem);
+               ret = create_user_srq(srq, udata, srq_buf_size);
                if (ret) {
-                       dev_err(hr_dev->dev,
-                             "hns_roce_ib_umem_write_mtt error for idx que\n");
-                       goto err_idx_buf;
+                       dev_err(hr_dev->dev, "Create user srq failed\n");
+                       goto err_srq;
                }
        } else {
-               page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
-               if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
-                                      (1 << page_shift) * 2, &srq->buf,
-                                      page_shift))
-                       return -ENOMEM;
-
-               srq->head = 0;
-               srq->tail = srq->max - 1;
-
-               ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
-                                       srq->buf.page_shift, &srq->mtt);
-               if (ret)
-                       goto err_buf;
-
-               ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
-               if (ret)
-                       goto err_srq_mtt;
-
-               page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
-               ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
+               ret = create_kernel_srq(srq, srq_buf_size);
                if (ret) {
-                       dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
-                               ret);
-                       goto err_srq_mtt;
-               }
-
-               /* Init mtt table for idx_que */
-               ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
-                                       srq->idx_que.idx_buf.page_shift,
-                                       &srq->idx_que.mtt);
-               if (ret)
-                       goto err_create_idx;
-
-               /* Write buffer address into the mtt table */
-               ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
-                                            &srq->idx_que.idx_buf);
-               if (ret)
-                       goto err_idx_buf;
-
-               srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
-               if (!srq->wrid) {
-                       ret = -ENOMEM;
-                       goto err_idx_buf;
+                       dev_err(hr_dev->dev, "Create kernel srq failed\n");
+                       goto err_srq;
                }
        }
 
@@ -373,27 +444,12 @@ err_srqc_alloc:
        hns_roce_srq_free(hr_dev, srq);
 
 err_wrid:
-       kvfree(srq->wrid);
-
-err_idx_buf:
-       hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
-
-err_idx_mtt:
-       ib_umem_release(srq->idx_que.umem);
-
-err_create_idx:
-       hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
-                         &srq->idx_que.idx_buf);
-       bitmap_free(srq->idx_que.bitmap);
-
-err_srq_mtt:
-       hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
-err_buf:
-       ib_umem_release(srq->umem);
-       if (!udata)
-               hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+       if (udata)
+               destroy_user_srq(hr_dev, srq);
+       else
+               destroy_kernel_srq(hr_dev, srq, srq_buf_size);
 
+err_srq:
        return ret;
 }
 
index d169a8031375c21dd02df8782f82216f78005df5..8056930bbe2c726e20bea2c0f92914f114b2f919 100644 (file)
@@ -97,18 +97,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
                            u8 port,
                            struct ib_port_attr *props)
 {
-       struct i40iw_device *iwdev = to_iwdev(ibdev);
-       struct net_device *netdev = iwdev->netdev;
-
-       /* props being zeroed by the caller, avoid zeroing it here */
-       props->max_mtu = IB_MTU_4096;
-       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
-
        props->lid = 1;
-       if (netif_carrier_ok(iwdev->netdev))
-               props->state = IB_PORT_ACTIVE;
-       else
-               props->state = IB_PORT_DOWN;
        props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
                IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
        props->gid_tbl_len = 1;
index 8790101facb7b5676d90d77439f2151c48f2d362..8d2f1e38b891c0f438e08a714862eeb4433df8e4 100644 (file)
@@ -734,7 +734,8 @@ out:
 
 static u8 state_to_phys_state(enum ib_port_state state)
 {
-       return state == IB_PORT_ACTIVE ? 5 : 3;
+       return state == IB_PORT_ACTIVE ?
+               IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
 }
 
 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
index 753479285ce92a5156314a028e02867154ea4e9c..6ae503cfc52640144cc7521f0d2fb67e9de80cfa 100644 (file)
@@ -377,6 +377,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
         * again
         */
        if (!ib_access_writable(access_flags)) {
+               unsigned long untagged_start = untagged_addr(start);
                struct vm_area_struct *vma;
 
                down_read(&current->mm->mmap_sem);
@@ -385,9 +386,9 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
                 * cover the memory, but for now it requires a single vma to
                 * entirely cover the MR to support RO mappings.
                 */
-               vma = find_vma(current->mm, start);
-               if (vma && vma->vm_end >= start + length &&
-                   vma->vm_start <= start) {
+               vma = find_vma(current->mm, untagged_start);
+               if (vma && vma->vm_end >= untagged_start + length &&
+                   vma->vm_start <= untagged_start) {
                        if (vma->vm_flags & VM_WRITE)
                                access_flags |= IB_ACCESS_LOCAL_WRITE;
                } else {
index 82aff2f2fdc23b1b9615c910a29a80e707db3cc1..bd4aa04416c6bcd50ed17aae6716208178d48e4d 100644 (file)
@@ -325,7 +325,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
 }
 
 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
-                      bool is_user, int has_rq, struct mlx4_ib_qp *qp,
+                      bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
                       u32 inl_recv_sz)
 {
        /* Sanity check RQ size before proceeding */
@@ -506,10 +506,10 @@ static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
        kfree(qp->sqp_proxy_rcv);
 }
 
-static int qp_has_rq(struct ib_qp_init_attr *attr)
+static bool qp_has_rq(struct ib_qp_init_attr *attr)
 {
        if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
-               return 0;
+               return false;
 
        return !attr->srq;
 }
@@ -855,12 +855,143 @@ static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
        mutex_unlock(&context->wqn_ranges_mutex);
 }
 
-static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
-                           enum mlx4_ib_source_type src,
-                           struct ib_qp_init_attr *init_attr,
+static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
+                    struct ib_udata *udata, struct mlx4_ib_qp *qp)
+{
+       struct mlx4_ib_dev *dev = to_mdev(pd->device);
+       int qpn;
+       int err;
+       struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
+               udata, struct mlx4_ib_ucontext, ibucontext);
+       struct mlx4_ib_cq *mcq;
+       unsigned long flags;
+       int range_size;
+       struct mlx4_ib_create_wq wq;
+       size_t copy_len;
+       int shift;
+       int n;
+
+       qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
+
+       mutex_init(&qp->mutex);
+       spin_lock_init(&qp->sq.lock);
+       spin_lock_init(&qp->rq.lock);
+       INIT_LIST_HEAD(&qp->gid_list);
+       INIT_LIST_HEAD(&qp->steering_rules);
+
+       qp->state = IB_QPS_RESET;
+
+       copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
+
+       if (ib_copy_from_udata(&wq, udata, copy_len)) {
+               err = -EFAULT;
+               goto err;
+       }
+
+       if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] ||
+           wq.reserved[2]) {
+               pr_debug("user command isn't supported\n");
+               err = -EOPNOTSUPP;
+               goto err;
+       }
+
+       if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
+               pr_debug("WQN range size must be equal or smaller than %d\n",
+                        dev->dev->caps.max_rss_tbl_sz);
+               err = -EOPNOTSUPP;
+               goto err;
+       }
+       range_size = 1 << wq.log_range_size;
+
+       if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
+               qp->flags |= MLX4_IB_QP_SCATTER_FCS;
+
+       err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
+       if (err)
+               goto err;
+
+       qp->sq_no_prefetch = 1;
+       qp->sq.wqe_cnt = 1;
+       qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
+       qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+                      (qp->sq.wqe_cnt << qp->sq.wqe_shift);
+
+       qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0);
+       if (IS_ERR(qp->umem)) {
+               err = PTR_ERR(qp->umem);
+               goto err;
+       }
+
+       n = ib_umem_page_count(qp->umem);
+       shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
+       err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+
+       if (err)
+               goto err_buf;
+
+       err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
+       if (err)
+               goto err_mtt;
+
+       err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
+       if (err)
+               goto err_mtt;
+       qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
+
+       err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
+       if (err)
+               goto err_wrid;
+
+       err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
+       if (err)
+               goto err_qpn;
+
+       /*
+        * Hardware wants QPN written in big-endian order (after
+        * shifting) for send doorbell.  Precompute this value to save
+        * a little bit when posting sends.
+        */
+       qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
+
+       qp->mqp.event = mlx4_ib_wq_event;
+
+       spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+       mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
+                        to_mcq(init_attr->recv_cq));
+       /* Maintain device to QPs access, needed for further handling
+        * via reset flow
+        */
+       list_add_tail(&qp->qps_list, &dev->qp_list);
+       /* Maintain CQ to QPs access, needed for further handling
+        * via reset flow
+        */
+       mcq = to_mcq(init_attr->send_cq);
+       list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
+       mcq = to_mcq(init_attr->recv_cq);
+       list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
+       mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
+                          to_mcq(init_attr->recv_cq));
+       spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+       return 0;
+
+err_qpn:
+       mlx4_ib_release_wqn(context, qp, 0);
+err_wrid:
+       mlx4_ib_db_unmap_user(context, &qp->db);
+
+err_mtt:
+       mlx4_mtt_cleanup(dev->dev, &qp->mtt);
+err_buf:
+       ib_umem_release(qp->umem);
+err:
+       return err;
+}
+
+static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
                            struct ib_udata *udata, int sqpn,
                            struct mlx4_ib_qp **caller_qp)
 {
+       struct mlx4_ib_dev *dev = to_mdev(pd->device);
        int qpn;
        int err;
        struct mlx4_ib_sqp *sqp = NULL;
@@ -870,7 +1001,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
        struct mlx4_ib_cq *mcq;
        unsigned long flags;
-       int range_size = 0;
 
        /* When tunneling special qps, we use a plain UD qp */
        if (sqpn) {
@@ -921,15 +1051,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        if (!sqp)
                                return -ENOMEM;
                        qp = &sqp->qp;
-                       qp->pri.vid = 0xFFFF;
-                       qp->alt.vid = 0xFFFF;
                } else {
                        qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
                        if (!qp)
                                return -ENOMEM;
-                       qp->pri.vid = 0xFFFF;
-                       qp->alt.vid = 0xFFFF;
                }
+               qp->pri.vid = 0xFFFF;
+               qp->alt.vid = 0xFFFF;
        } else
                qp = *caller_qp;
 
@@ -941,48 +1069,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        INIT_LIST_HEAD(&qp->gid_list);
        INIT_LIST_HEAD(&qp->steering_rules);
 
-       qp->state        = IB_QPS_RESET;
+       qp->state = IB_QPS_RESET;
        if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
                qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
 
-
        if (udata) {
-               union {
-                       struct mlx4_ib_create_qp qp;
-                       struct mlx4_ib_create_wq wq;
-               } ucmd;
+               struct mlx4_ib_create_qp ucmd;
                size_t copy_len;
                int shift;
                int n;
 
-               copy_len = (src == MLX4_IB_QP_SRC) ?
-                          sizeof(struct mlx4_ib_create_qp) :
-                          min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
+               copy_len = sizeof(struct mlx4_ib_create_qp);
 
                if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
                        err = -EFAULT;
                        goto err;
                }
 
-               if (src == MLX4_IB_RWQ_SRC) {
-                       if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] ||
-                           ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) {
-                               pr_debug("user command isn't supported\n");
-                               err = -EOPNOTSUPP;
-                               goto err;
-                       }
-
-                       if (ucmd.wq.log_range_size >
-                           ilog2(dev->dev->caps.max_rss_tbl_sz)) {
-                               pr_debug("WQN range size must be equal or smaller than %d\n",
-                                        dev->dev->caps.max_rss_tbl_sz);
-                               err = -EOPNOTSUPP;
-                               goto err;
-                       }
-                       range_size = 1 << ucmd.wq.log_range_size;
-               } else {
-                       qp->inl_recv_sz = ucmd.qp.inl_recv_sz;
-               }
+               qp->inl_recv_sz = ucmd.inl_recv_sz;
 
                if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
                        if (!(dev->dev->caps.flags &
@@ -1000,30 +1104,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                if (err)
                        goto err;
 
-               if (src == MLX4_IB_QP_SRC) {
-                       qp->sq_no_prefetch = ucmd.qp.sq_no_prefetch;
+               qp->sq_no_prefetch = ucmd.sq_no_prefetch;
 
-                       err = set_user_sq_size(dev, qp,
-                                              (struct mlx4_ib_create_qp *)
-                                              &ucmd);
-                       if (err)
-                               goto err;
-               } else {
-                       qp->sq_no_prefetch = 1;
-                       qp->sq.wqe_cnt = 1;
-                       qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
-                       /* Allocated buffer expects to have at least that SQ
-                        * size.
-                        */
-                       qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
-                               (qp->sq.wqe_cnt << qp->sq.wqe_shift);
-               }
+               err = set_user_sq_size(dev, qp, &ucmd);
+               if (err)
+                       goto err;
 
                qp->umem =
-                       ib_umem_get(udata,
-                                   (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
-                                                             ucmd.wq.buf_addr,
-                                   qp->buf_size, 0, 0);
+                       ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0);
                if (IS_ERR(qp->umem)) {
                        err = PTR_ERR(qp->umem);
                        goto err;
@@ -1041,11 +1129,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        goto err_mtt;
 
                if (qp_has_rq(init_attr)) {
-                       err = mlx4_ib_db_map_user(udata,
-                                                 (src == MLX4_IB_QP_SRC) ?
-                                                         ucmd.qp.db_addr :
-                                                         ucmd.wq.db_addr,
-                                                 &qp->db);
+                       err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
                        if (err)
                                goto err_mtt;
                }
@@ -1115,10 +1199,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                                goto err_wrid;
                        }
                }
-       } else if (src == MLX4_IB_RWQ_SRC) {
-               err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
-               if (err)
-                       goto err_wrid;
        } else {
                /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
                 * otherwise, the WQE BlueFlame setup flow wrongly causes
@@ -1157,8 +1237,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
         */
        qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
 
-       qp->mqp.event = (src == MLX4_IB_QP_SRC) ? mlx4_ib_qp_event :
-                                                 mlx4_ib_wq_event;
+       qp->mqp.event = mlx4_ib_qp_event;
 
        if (!*caller_qp)
                *caller_qp = qp;
@@ -1186,8 +1265,6 @@ err_qpn:
        if (!sqpn) {
                if (qp->flags & MLX4_IB_QP_NETIF)
                        mlx4_ib_steer_qp_free(dev, qpn, 1);
-               else if (src == MLX4_IB_RWQ_SRC)
-                       mlx4_ib_release_wqn(context, qp, 0);
                else
                        mlx4_qp_release_range(dev->dev, qpn, 1);
        }
@@ -1518,8 +1595,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
                /* fall through */
        case IB_QPT_UD:
        {
-               err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
-                                      init_attr, udata, 0, &qp);
+               err = create_qp_common(pd, init_attr, udata, 0, &qp);
                if (err) {
                        kfree(qp);
                        return ERR_PTR(err);
@@ -1549,8 +1625,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
                        sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
                }
 
-               err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
-                                      init_attr, udata, sqpn, &qp);
+               err = create_qp_common(pd, init_attr, udata, sqpn, &qp);
                if (err)
                        return ERR_PTR(err);
 
@@ -4047,8 +4122,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
                                struct ib_wq_init_attr *init_attr,
                                struct ib_udata *udata)
 {
-       struct mlx4_ib_dev *dev;
-       struct ib_qp_init_attr ib_qp_init_attr;
+       struct mlx4_dev *dev = to_mdev(pd->device)->dev;
+       struct ib_qp_init_attr ib_qp_init_attr = {};
        struct mlx4_ib_qp *qp;
        struct mlx4_ib_create_wq ucmd;
        int err, required_cmd_sz;
@@ -4073,14 +4148,13 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
        if (udata->outlen)
                return ERR_PTR(-EOPNOTSUPP);
 
-       dev = to_mdev(pd->device);
-
        if (init_attr->wq_type != IB_WQT_RQ) {
                pr_debug("unsupported wq type %d\n", init_attr->wq_type);
                return ERR_PTR(-EOPNOTSUPP);
        }
 
-       if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS) {
+       if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS ||
+           !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
                pr_debug("unsupported create_flags %u\n",
                         init_attr->create_flags);
                return ERR_PTR(-EOPNOTSUPP);
@@ -4093,7 +4167,6 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
        qp->pri.vid = 0xFFFF;
        qp->alt.vid = 0xFFFF;
 
-       memset(&ib_qp_init_attr, 0, sizeof(ib_qp_init_attr));
        ib_qp_init_attr.qp_context = init_attr->wq_context;
        ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
        ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
@@ -4104,8 +4177,7 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
        if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS)
                ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS;
 
-       err = create_qp_common(dev, pd, MLX4_IB_RWQ_SRC, &ib_qp_init_attr,
-                              udata, 0, &qp);
+       err = create_rq(pd, &ib_qp_init_attr, udata, qp);
        if (err) {
                kfree(qp);
                return ERR_PTR(err);
index af5bbb35c0589364794ca131a1e741392cfe5931..59022b7441448f0b5eca7e5f406196e4cfe777d3 100644 (file)
@@ -233,6 +233,8 @@ static bool is_legacy_obj_event_num(u16 event_num)
        case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
        case MLX5_EVENT_TYPE_DCT_DRAINED:
        case MLX5_EVENT_TYPE_COMP:
+       case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
+       case MLX5_EVENT_TYPE_XRQ_ERROR:
                return true;
        default:
                return false;
@@ -315,8 +317,10 @@ static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
        case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
                return eqe->data.qp_srq.type;
        case MLX5_EVENT_TYPE_CQ_ERROR:
+       case MLX5_EVENT_TYPE_XRQ_ERROR:
                return 0;
        case MLX5_EVENT_TYPE_DCT_DRAINED:
+       case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
                return MLX5_EVENT_QUEUE_TYPE_DCT;
        default:
                return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
@@ -542,6 +546,8 @@ static u64 devx_get_obj_id(const void *in)
                break;
        case MLX5_CMD_OP_ARM_XRQ:
        case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
+       case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+       case MLX5_CMD_OP_MODIFY_XRQ:
                obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
                                        MLX5_GET(arm_xrq_in, in, xrqn));
                break;
@@ -776,6 +782,14 @@ static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
                        return true;
                return false;
        }
+       case MLX5_CMD_OP_CREATE_PSV:
+       {
+               u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
+
+               if (num_psv == 1)
+                       return true;
+               return false;
+       }
        default:
                return false;
        }
@@ -810,6 +824,8 @@ static bool devx_is_obj_modify_cmd(const void *in)
        case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
        case MLX5_CMD_OP_ARM_XRQ:
        case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
+       case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+       case MLX5_CMD_OP_MODIFY_XRQ:
                return true;
        case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
        {
@@ -922,6 +938,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
        case MLX5_CMD_OP_QUERY_CONG_STATUS:
        case MLX5_CMD_OP_QUERY_CONG_PARAMS:
        case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+       case MLX5_CMD_OP_QUERY_LAG:
                return true;
        default:
                return false;
@@ -1215,6 +1232,12 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
        case MLX5_CMD_OP_ALLOC_XRCD:
                MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
                break;
+       case MLX5_CMD_OP_CREATE_PSV:
+               MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
+                        MLX5_CMD_OP_DESTROY_PSV);
+               MLX5_SET(destroy_psv_in, din, psvn,
+                        MLX5_GET(create_psv_out, out, psv0_index));
+               break;
        default:
                /* The entry must match to one of the devx_is_obj_create_cmd */
                WARN_ON(true);
@@ -2285,7 +2308,11 @@ static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
        case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
                obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
                break;
+       case MLX5_EVENT_TYPE_XRQ_ERROR:
+               obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
+               break;
        case MLX5_EVENT_TYPE_DCT_DRAINED:
+       case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
                obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
                break;
        case MLX5_EVENT_TYPE_CQ_ERROR:
index 7d5c2763b691cd501ac6f300fc45b57ec572dd6b..1ec0e667110ee6d7c0aa0ec33ecdd88f25d1fc8f 100644 (file)
@@ -535,7 +535,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
        props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
        props->pkey_tbl_len     = 1;
        props->state            = IB_PORT_DOWN;
-       props->phys_state       = 3;
+       props->phys_state       = IB_PORT_PHYS_STATE_DISABLED;
 
        mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
        props->qkey_viol_cntr = qkey_viol_cntr;
@@ -561,7 +561,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
 
        if (netif_running(ndev) && netif_carrier_ok(ndev)) {
                props->state      = IB_PORT_ACTIVE;
-               props->phys_state = 5;
+               props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
        }
 
        ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
@@ -5318,11 +5318,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
        INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
 };
 
+static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
+{
+       return MLX5_ESWITCH_MANAGER(mdev) &&
+              mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
+                      MLX5_ESWITCH_OFFLOADS;
+}
+
 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
 {
+       int num_cnt_ports;
        int i;
 
-       for (i = 0; i < dev->num_ports; i++) {
+       num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
+
+       for (i = 0; i < num_cnt_ports; i++) {
                if (dev->port[i].cnts.set_id_valid)
                        mlx5_core_dealloc_q_counter(dev->mdev,
                                                    dev->port[i].cnts.set_id);
@@ -5424,13 +5434,15 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
 
 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
 {
+       int num_cnt_ports;
        int err = 0;
        int i;
        bool is_shared;
 
        is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
+       num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
 
-       for (i = 0; i < dev->num_ports; i++) {
+       for (i = 0; i < num_cnt_ports; i++) {
                err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
                if (err)
                        goto err_alloc;
@@ -5450,7 +5462,6 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
                }
                dev->port[i].cnts.set_id_valid = true;
        }
-
        return 0;
 
 err_alloc:
@@ -5458,25 +5469,50 @@ err_alloc:
        return err;
 }
 
+static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
+                                                  u8 port_num)
+{
+       return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
+                                                  &dev->port[port_num].cnts;
+}
+
+/**
+ * mlx5_ib_get_counters_id - Returns counters id to use for device+port
+ * @dev:       Pointer to mlx5 IB device
+ * @port_num:  Zero based port number
+ *
+ * mlx5_ib_get_counters_id() Returns counters set id to use for given
+ * device port combination in switchdev and non switchdev mode of the
+ * parent device.
+ */
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
+{
+       const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
+
+       return cnts->set_id;
+}
+
 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
                                                    u8 port_num)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
-       struct mlx5_ib_port *port = &dev->port[port_num - 1];
+       const struct mlx5_ib_counters *cnts;
+       bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
 
-       /* We support only per port stats */
-       if (port_num == 0)
+       if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
                return NULL;
 
-       return rdma_alloc_hw_stats_struct(port->cnts.names,
-                                         port->cnts.num_q_counters +
-                                         port->cnts.num_cong_counters +
-                                         port->cnts.num_ext_ppcnt_counters,
+       cnts = get_counters(dev, port_num - 1);
+
+       return rdma_alloc_hw_stats_struct(cnts->names,
+                                         cnts->num_q_counters +
+                                         cnts->num_cong_counters +
+                                         cnts->num_ext_ppcnt_counters,
                                          RDMA_HW_STATS_DEFAULT_LIFESPAN);
 }
 
 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
-                                   struct mlx5_ib_port *port,
+                                   const struct mlx5_ib_counters *cnts,
                                    struct rdma_hw_stats *stats,
                                    u16 set_id)
 {
@@ -5493,8 +5529,8 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
        if (ret)
                goto free;
 
-       for (i = 0; i < port->cnts.num_q_counters; i++) {
-               val = *(__be32 *)(out + port->cnts.offsets[i]);
+       for (i = 0; i < cnts->num_q_counters; i++) {
+               val = *(__be32 *)(out + cnts->offsets[i]);
                stats->value[i] = (u64)be32_to_cpu(val);
        }
 
@@ -5504,10 +5540,10 @@ free:
 }
 
 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
-                                         struct mlx5_ib_port *port,
-                                         struct rdma_hw_stats *stats)
+                                           const struct mlx5_ib_counters *cnts,
+                                           struct rdma_hw_stats *stats)
 {
-       int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+       int offset = cnts->num_q_counters + cnts->num_cong_counters;
        int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
        int ret, i;
        void *out;
@@ -5520,12 +5556,10 @@ static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
        if (ret)
                goto free;
 
-       for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
+       for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
                stats->value[i + offset] =
                        be64_to_cpup((__be64 *)(out +
-                                   port->cnts.offsets[i + offset]));
-       }
-
+                                   cnts->offsets[i + offset]));
 free:
        kvfree(out);
        return ret;
@@ -5536,7 +5570,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
                                u8 port_num, int index)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
-       struct mlx5_ib_port *port = &dev->port[port_num - 1];
+       const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
        struct mlx5_core_dev *mdev;
        int ret, num_counters;
        u8 mdev_port_num;
@@ -5544,18 +5578,17 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
        if (!stats)
                return -EINVAL;
 
-       num_counters = port->cnts.num_q_counters +
-                      port->cnts.num_cong_counters +
-                      port->cnts.num_ext_ppcnt_counters;
+       num_counters = cnts->num_q_counters +
+                      cnts->num_cong_counters +
+                      cnts->num_ext_ppcnt_counters;
 
        /* q_counters are per IB device, query the master mdev */
-       ret = mlx5_ib_query_q_counters(dev->mdev, port, stats,
-                                      port->cnts.set_id);
+       ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
        if (ret)
                return ret;
 
        if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
-               ret =  mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
+               ret =  mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
                if (ret)
                        return ret;
        }
@@ -5572,10 +5605,10 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
                }
                ret = mlx5_lag_query_cong_counters(dev->mdev,
                                                   stats->value +
-                                                  port->cnts.num_q_counters,
-                                                  port->cnts.num_cong_counters,
-                                                  port->cnts.offsets +
-                                                  port->cnts.num_q_counters);
+                                                  cnts->num_q_counters,
+                                                  cnts->num_cong_counters,
+                                                  cnts->offsets +
+                                                  cnts->num_q_counters);
 
                mlx5_ib_put_native_port_mdev(dev, port_num);
                if (ret)
@@ -5590,20 +5623,22 @@ static struct rdma_hw_stats *
 mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
 {
        struct mlx5_ib_dev *dev = to_mdev(counter->device);
-       struct mlx5_ib_port *port = &dev->port[counter->port - 1];
+       const struct mlx5_ib_counters *cnts =
+               get_counters(dev, counter->port - 1);
 
        /* Q counters are in the beginning of all counters */
-       return rdma_alloc_hw_stats_struct(port->cnts.names,
-                                         port->cnts.num_q_counters,
+       return rdma_alloc_hw_stats_struct(cnts->names,
+                                         cnts->num_q_counters,
                                          RDMA_HW_STATS_DEFAULT_LIFESPAN);
 }
 
 static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
 {
        struct mlx5_ib_dev *dev = to_mdev(counter->device);
-       struct mlx5_ib_port *port = &dev->port[counter->port - 1];
+       const struct mlx5_ib_counters *cnts =
+               get_counters(dev, counter->port - 1);
 
-       return mlx5_ib_query_q_counters(dev->mdev, port,
+       return mlx5_ib_query_q_counters(dev->mdev, cnts,
                                        counter->stats, counter->id);
 }
 
@@ -5780,7 +5815,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
                mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
 }
 
-/* The mlx5_ib_multiport_mutex should be held when calling this function */
 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
                                      struct mlx5_ib_multiport_info *mpi)
 {
@@ -5790,6 +5824,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
        int err;
        int i;
 
+       lockdep_assert_held(&mlx5_ib_multiport_mutex);
+
        mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
 
        spin_lock(&port->mp.mpi_lock);
@@ -5839,13 +5875,14 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
        ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
 }
 
-/* The mlx5_ib_multiport_mutex should be held when calling this function */
 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
                                    struct mlx5_ib_multiport_info *mpi)
 {
        u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
        int err;
 
+       lockdep_assert_held(&mlx5_ib_multiport_mutex);
+
        spin_lock(&ibdev->port[port_num].mp.mpi_lock);
        if (ibdev->port[port_num].mp.mpi) {
                mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
@@ -6928,7 +6965,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->port = kcalloc(num_ports, sizeof(*dev->port),
                             GFP_KERNEL);
        if (!dev->port) {
-               ib_dealloc_device((struct ib_device *)dev);
+               ib_dealloc_device(&dev->ib_dev);
                return NULL;
        }
 
index f6a53455bf8bd7050814c60c5d070e6aade844de..cb41a7e6255a2f7e5b22cd7136b0624164da60f1 100644 (file)
@@ -1475,4 +1475,5 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
                        bool dyn_bfreg);
 
 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
 #endif /* MLX5_IB_H */
index 762038ab83e290b2860cb45b324bdf0f40e6e0b8..817c924e72897bdaf968ef39a9e3bc1dc0d7c8be 100644 (file)
@@ -1004,9 +1004,6 @@ static int mlx5_ib_mr_initiator_pfault_handler(
        u32 transport_caps;
        struct mlx5_base_av *av;
        unsigned ds, opcode;
-#if defined(DEBUG)
-       u32 ctrl_wqe_index, ctrl_qpn;
-#endif
        u32 qpn = qp->trans_qp.base.mqp.qpn;
 
        ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
@@ -1022,27 +1019,6 @@ static int mlx5_ib_mr_initiator_pfault_handler(
                return -EFAULT;
        }
 
-#if defined(DEBUG)
-       ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
-                       MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
-                       MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
-       if (wqe_index != ctrl_wqe_index) {
-               mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
-                           wqe_index, qpn,
-                           ctrl_wqe_index);
-               return -EFAULT;
-       }
-
-       ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
-               MLX5_WQE_CTRL_QPN_SHIFT;
-       if (qpn != ctrl_qpn) {
-               mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
-                           wqe_index, qpn,
-                           ctrl_qpn);
-               return -EFAULT;
-       }
-#endif /* DEBUG */
-
        *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
        *wqe += sizeof(*ctrl);
 
index 379328b2598fd59f1de6674df0918f4d6d79a91c..e96dcdfd6c3a300696fe2083d5bf6dee6c7ed81e 100644 (file)
@@ -3386,19 +3386,16 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
        struct mlx5_ib_dev *dev = to_mdev(qp->device);
        struct mlx5_ib_qp *mqp = to_mqp(qp);
        struct mlx5_qp_context context = {};
-       struct mlx5_ib_port *mibport = NULL;
        struct mlx5_ib_qp_base *base;
        u32 set_id;
 
        if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
                return 0;
 
-       if (counter) {
+       if (counter)
                set_id = counter->id;
-       } else {
-               mibport = &dev->port[mqp->port - 1];
-               set_id = mibport->cnts.set_id;
-       }
+       else
+               set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
 
        base = &mqp->trans_qp.base;
        context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
@@ -3459,7 +3456,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        struct mlx5_ib_cq *send_cq, *recv_cq;
        struct mlx5_qp_context *context;
        struct mlx5_ib_pd *pd;
-       struct mlx5_ib_port *mibport = NULL;
        enum mlx5_qp_state mlx5_cur, mlx5_new;
        enum mlx5_qp_optpar optpar;
        u32 set_id = 0;
@@ -3624,11 +3620,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                if (qp->flags & MLX5_IB_QP_UNDERLAY)
                        port_num = 0;
 
-               mibport = &dev->port[port_num];
                if (ibqp->counter)
                        set_id = ibqp->counter->id;
                else
-                       set_id = mibport->cnts.set_id;
+                       set_id = mlx5_ib_get_counters_id(dev, port_num);
                context->qp_counter_set_usr_page |=
                        cpu_to_be32(set_id << 24);
        }
@@ -3817,6 +3812,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+               u16 set_id;
+
                required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
                if (!is_valid_mask(attr_mask, required, 0))
                        return -EINVAL;
@@ -3843,7 +3840,9 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                }
                MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
                MLX5_SET(dctc, dctc, port, attr->port_num);
-               MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id);
+
+               set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
+               MLX5_SET(dctc, dctc, counter_set_id, set_id);
 
        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
                struct mlx5_ib_modify_qp_resp resp = {};
@@ -6331,11 +6330,13 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
        }
 
        if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
+               u16 set_id;
+
+               set_id = mlx5_ib_get_counters_id(dev, 0);
                if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
                        MLX5_SET64(modify_rq_in, in, modify_bitmask,
                                   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
-                       MLX5_SET(rqc, rqc, counter_set_id,
-                                dev->port->cnts.set_id);
+                       MLX5_SET(rqc, rqc, counter_set_id, set_id);
                } else
                        dev_info_once(
                                &dev->ib_dev.dev,
index b0d0687c7a686571a0cd8aa043282f80dadc76c5..8fc3630a9d4c3742995f31b2d06584b8024a29cf 100644 (file)
@@ -86,7 +86,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
        xa_lock(&table->array);
        srq = xa_load(&table->array, srqn);
        if (srq)
-               atomic_inc(&srq->common.refcount);
+               refcount_inc(&srq->common.refcount);
        xa_unlock(&table->array);
 
        return srq;
@@ -592,7 +592,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
        if (err)
                return err;
 
-       atomic_set(&srq->common.refcount, 1);
+       refcount_set(&srq->common.refcount, 1);
        init_completion(&srq->common.free);
 
        err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
@@ -675,7 +675,7 @@ static int srq_event_notifier(struct notifier_block *nb,
        xa_lock(&table->array);
        srq = xa_load(&table->array, srqn);
        if (srq)
-               atomic_inc(&srq->common.refcount);
+               refcount_inc(&srq->common.refcount);
        xa_unlock(&table->array);
 
        if (!srq)
index bccc1137810924780a60dedc100a784edac45a9a..e8267e59077226ff8758539eaa3c21a9575634f7 100644 (file)
@@ -163,10 +163,10 @@ int ocrdma_query_port(struct ib_device *ibdev,
        netdev = dev->nic_info.netdev;
        if (netif_running(netdev) && netif_oper_up(netdev)) {
                port_state = IB_PORT_ACTIVE;
-               props->phys_state = 5;
+               props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
        } else {
                port_state = IB_PORT_DOWN;
-               props->phys_state = 3;
+               props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
        }
        props->max_mtu = IB_MTU_4096;
        props->active_mtu = iboe_get_mtu(netdev->mtu);
index f97b3d65b30cc7e9bfb4d7303537d4f65b04b50d..5136b835e1ba0521050caf388d018125a0ad2347 100644 (file)
@@ -826,7 +826,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
        if (rc)
                goto out;
 
-       dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr;
+       dev->db_addr = out_params.dpi_addr;
        dev->db_phys_addr = out_params.dpi_phys_addr;
        dev->db_size = out_params.dpi_size;
        dev->dpi = out_params.dpi;
index a92ca22e5de147334feef511edd157a7824a16ff..0cfd849b13d62270cb1b250266fd52fbaf5c151d 100644 (file)
@@ -229,7 +229,7 @@ struct qedr_ucontext {
        struct ib_ucontext ibucontext;
        struct qedr_dev *dev;
        struct qedr_pd *pd;
-       u64 dpi_addr;
+       void __iomem *dpi_addr;
        u64 dpi_phys_addr;
        u32 dpi_size;
        u16 dpi;
index 27d90a84ea01c9b8d4b9b0c96ef47a390a8cd6ea..6f3ce86019b733827b24b043f527cc36ba5e1f80 100644 (file)
@@ -221,10 +221,10 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
        /* *attr being zeroed by the caller, avoid zeroing it here */
        if (rdma_port->port_state == QED_RDMA_PORT_UP) {
                attr->state = IB_PORT_ACTIVE;
-               attr->phys_state = 5;
+               attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
        } else {
                attr->state = IB_PORT_DOWN;
-               attr->phys_state = 3;
+               attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
        }
        attr->max_mtu = IB_MTU_4096;
        attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
@@ -2451,7 +2451,6 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
        struct qedr_dev *dev = qp->dev;
        struct ib_qp_attr attr;
        int attr_mask = 0;
-       int rc = 0;
 
        DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
                 qp, qp->qp_type);
@@ -2496,7 +2495,7 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
                xa_erase_irq(&dev->qps, qp->qp_id);
                kfree(qp);
        }
-       return rc;
+       return 0;
 }
 
 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
index 27b6e664e59de7e5c1465ac2384c2b862a20a425..b0144229cf3b5364774187792bd452602b3b9449 100644 (file)
@@ -1789,7 +1789,6 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd)
 
 static int qib_close(struct inode *in, struct file *fp)
 {
-       int ret = 0;
        struct qib_filedata *fd;
        struct qib_ctxtdata *rcd;
        struct qib_devdata *dd;
@@ -1873,7 +1872,7 @@ static int qib_close(struct inode *in, struct file *fp)
 
 bail:
        kfree(fd);
-       return ret;
+       return 0;
 }
 
 static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
index eeb07b245ef9bb9c735841199b5c1a3e1a628ada..556b8e44a51c49e103b195d3619818b133c9bd7e 100644 (file)
@@ -194,7 +194,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
                        return ERR_CAST(dev_list);
                for (i = 0; dev_list[i]; i++) {
                        dev = dev_list[i];
-                       vf = pci_get_drvdata(to_pci_dev(dev));
+                       vf = dev_get_drvdata(dev);
                        spin_lock(&vf->lock);
                        vnic = vf->vnic;
                        if (!usnic_vnic_check_room(vnic, res_spec)) {
@@ -356,13 +356,14 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
 
        if (!us_ibdev->ufdev->link_up) {
                props->state = IB_PORT_DOWN;
-               props->phys_state = 3;
+               props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
        } else if (!us_ibdev->ufdev->inaddr) {
                props->state = IB_PORT_INIT;
-               props->phys_state = 4;
+               props->phys_state =
+                       IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
        } else {
                props->state = IB_PORT_ACTIVE;
-               props->phys_state = 5;
+               props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
        }
 
        props->port_cap_flags = 0;
index ecf6e659c0dad87300630a946100047d143cde4d..fb07eed9e4028345a02aac983269241012de2fae 100644 (file)
  */
 #define RXE_UVERBS_ABI_VERSION         2
 
-#define RDMA_LINK_PHYS_STATE_LINK_UP   (5)
-#define RDMA_LINK_PHYS_STATE_DISABLED  (3)
-#define RDMA_LINK_PHYS_STATE_POLLING   (2)
-
 #define RXE_ROCE_V2_SPORT              (0xc000)
 
 static inline u32 rxe_crc32(struct rxe_dev *rxe,
index 1abed47ca22170b7f7c1d84ae1a6769415f24869..fe520738670066e096f977153927617ea90baf96 100644 (file)
@@ -154,7 +154,7 @@ enum rxe_port_param {
        RXE_PORT_ACTIVE_WIDTH           = IB_WIDTH_1X,
        RXE_PORT_ACTIVE_SPEED           = 1,
        RXE_PORT_PKEY_TBL_LEN           = 64,
-       RXE_PORT_PHYS_STATE             = 2,
+       RXE_PORT_PHYS_STATE             = IB_PORT_PHYS_STATE_POLLING,
        RXE_PORT_SUBNET_PREFIX          = 0xfe80000000000000ULL,
 };
 
index 4ebdfcf4d33e3800de9e7884f7869659735590cd..623129f27f5a150be4eb276de25553eab9f3e0dd 100644 (file)
@@ -69,11 +69,11 @@ static int rxe_query_port(struct ib_device *dev,
                              &attr->active_width);
 
        if (attr->state == IB_PORT_ACTIVE)
-               attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP;
+               attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
        else if (dev_get_flags(rxe->ndev) & IFF_UP)
-               attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING;
+               attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
        else
-               attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED;
+               attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
 
        mutex_unlock(&rxe->usdev_lock);
 
index e7f3a2379d9d87858ddb83bd7abce843ed330a7a..03176a3d1e18b7117fcda6bc4e8ddada5c0cc1c6 100644 (file)
@@ -206,7 +206,8 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
        attr->gid_tbl_len = 1;
        attr->max_msg_sz = -1;
        attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
-       attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 5 : 3;
+       attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
+               IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
        attr->pkey_tbl_len = 1;
        attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
        attr->state = sdev->state;
index 1a039f16d315422b969df3bbbb15c5843b6d8865..e25c70a56be654db4389aa799b61ed790d7b5c15 100644 (file)
@@ -1767,8 +1767,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
                goto out;
 
 retry:
-       ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
-                       0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
+       ch->cq = ib_alloc_cq_any(sdev->device, ch, ch->rq_size + sq_size,
+                                IB_POLL_WORKQUEUE);
        if (IS_ERR(ch->cq)) {
                ret = PTR_ERR(ch->cq);
                pr_err("failed to create CQ cqe= %d ret= %d\n",
index 8cdd7e66f8df5b69d75fe18cb5aafb8a0b1dc436..53d09620e215ae954547bb93bb0901374e5c9c79 100644 (file)
@@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_CREATE_UMEM:
        case MLX5_CMD_OP_DESTROY_UMEM:
        case MLX5_CMD_OP_ALLOC_MEMIC:
+       case MLX5_CMD_OP_MODIFY_XRQ:
+       case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
                *status = MLX5_DRIVER_STATUS_ABORTED;
                *synd = MLX5_DRIVER_SYND;
                return -EIO;
@@ -637,6 +639,8 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
        MLX5_COMMAND_STR_CASE(CREATE_UMEM);
        MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
+       MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
+       MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
        default: return "unknown command opcode";
        }
 }
index 41f25ea2e8d9eefaaee7d2b14e668185bed78ff1..2df9aaa421c69bd2f2082f9f48c57a4d32bc6850 100644 (file)
@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
                 */
                dma_rmb();
 
-               if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
-                       atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
-               else
-                       mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
-
+               atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
                atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
 
                ++eq->cons_index;
@@ -945,9 +941,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
 {
        struct mlx5_eq_table *eqt = dev->priv.eq_table;
 
-       if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
-               return -EINVAL;
-
        return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
 }
 EXPORT_SYMBOL(mlx5_eq_notifier_register);
@@ -956,9 +949,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
 {
        struct mlx5_eq_table *eqt = dev->priv.eq_table;
 
-       if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
-               return -EINVAL;
-
        return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
 }
 EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
index 1f3891fde2eb171be76294b45aefcfab57f9990e..81e03e493a0124d28c105067fa3f40a33fb22d15 100644 (file)
@@ -58,20 +58,9 @@ struct vport_addr {
        bool mc_promisc;
 };
 
-enum {
-       UC_ADDR_CHANGE = BIT(0),
-       MC_ADDR_CHANGE = BIT(1),
-       PROMISC_CHANGE = BIT(3),
-};
-
 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
 
-/* Vport context events */
-#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
-                           MC_ADDR_CHANGE | \
-                           PROMISC_CHANGE)
-
 struct mlx5_vport *__must_check
 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
 {
@@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
 
        MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
 
-       if (events_mask & UC_ADDR_CHANGE)
+       if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
                MLX5_SET(nic_vport_context, nic_vport_ctx,
                         event_on_uc_address_change, 1);
-       if (events_mask & MC_ADDR_CHANGE)
+       if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
                MLX5_SET(nic_vport_context, nic_vport_ctx,
                         event_on_mc_address_change, 1);
-       if (events_mask & PROMISC_CHANGE)
+       if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
                MLX5_SET(nic_vport_context, nic_vport_ctx,
                         event_on_promisc_change, 1);
 
@@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
        return err;
 }
 
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+                                       MLX5_VPORT_MC_ADDR_CHANGE | \
+                                       MLX5_VPORT_PROMISC_CHANGE)
+
+static int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+       int ret;
+
+       ret = esw_create_legacy_table(esw);
+       if (ret)
+               return ret;
+
+       mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+       return 0;
+}
+
 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
 {
        esw_cleanup_vepa_rules(esw);
@@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
        esw_destroy_legacy_vepa_table(esw);
 }
 
+static void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+       struct esw_mc_addr *mc_promisc;
+
+       mlx5_eswitch_disable_pf_vf_vports(esw);
+
+       mc_promisc = &esw->mc_promisc;
+       if (mc_promisc->uplink_rule)
+               mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+       esw_destroy_legacy_table(esw);
+}
+
 /* E-Switch vport UC/MC lists management */
 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
                                 struct vport_addr *vaddr);
@@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
        esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
                  vport->vport, mac);
 
-       if (vport->enabled_events & UC_ADDR_CHANGE) {
+       if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
                esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
                esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
        }
 
-       if (vport->enabled_events & MC_ADDR_CHANGE)
+       if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
                esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
 
-       if (vport->enabled_events & PROMISC_CHANGE) {
+       if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
                esw_update_vport_rx_mode(esw, vport);
                if (!IS_ERR_OR_NULL(vport->allmulti_rule))
                        esw_update_vport_mc_promisc(esw, vport);
        }
 
-       if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
+       if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
                esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
 
        esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
@@ -1393,18 +1411,49 @@ out:
        return err;
 }
 
+static bool element_type_supported(struct mlx5_eswitch *esw, int type)
+{
+       const struct mlx5_core_dev *dev = esw->dev;
+
+       switch (type) {
+       case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+               return MLX5_CAP_QOS(dev, esw_element_type) &
+                      ELEMENT_TYPE_CAP_MASK_TASR;
+       case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+               return MLX5_CAP_QOS(dev, esw_element_type) &
+                      ELEMENT_TYPE_CAP_MASK_VPORT;
+       case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+               return MLX5_CAP_QOS(dev, esw_element_type) &
+                      ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+       case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+               return MLX5_CAP_QOS(dev, esw_element_type) &
+                      ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+       }
+       return false;
+}
+
 /* Vport QoS management */
-static int esw_create_tsar(struct mlx5_eswitch *esw)
+static void esw_create_tsar(struct mlx5_eswitch *esw)
 {
        u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
        struct mlx5_core_dev *dev = esw->dev;
+       __be32 *attr;
        int err;
 
        if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
-               return 0;
+               return;
+
+       if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+               return;
 
        if (esw->qos.enabled)
-               return -EEXIST;
+               return;
+
+       MLX5_SET(scheduling_context, tsar_ctx, element_type,
+                SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+       attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+       *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
 
        err = mlx5_create_scheduling_element_cmd(dev,
                                                 SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
                                                 &esw->qos.root_tsar_id);
        if (err) {
                esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
-               return err;
+               return;
        }
 
        esw->qos.enabled = true;
-       return 0;
 }
 
 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
@@ -1619,7 +1667,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
 }
 
 static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
-                            int enable_events)
+                            enum mlx5_eswitch_vport_event enabled_events)
 {
        u16 vport_num = vport->vport;
 
@@ -1641,7 +1689,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
                esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
 
        /* Sync with current vport context */
-       vport->enabled_events = enable_events;
+       vport->enabled_events = enabled_events;
        vport->enabled = true;
 
        /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
@@ -1770,11 +1818,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
 /* Public E-Switch API */
 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
 
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
+ * whichever are present on the eswitch.
+ */
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+                                enum mlx5_eswitch_vport_event enabled_events)
 {
        struct mlx5_vport *vport;
+       int i;
+
+       /* Enable PF vport */
+       vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+       esw_enable_vport(esw, vport, enabled_events);
+
+       /* Enable ECPF vports */
+       if (mlx5_ecpf_vport_exists(esw->dev)) {
+               vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+               esw_enable_vport(esw, vport, enabled_events);
+       }
+
+       /* Enable VF vports */
+       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+               esw_enable_vport(esw, vport, enabled_events);
+}
+
+/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
+ * whichever are previously enabled on the eswitch.
+ */
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
+{
+       struct mlx5_vport *vport;
+       int i;
+
+       mlx5_esw_for_all_vports_reverse(esw, i, vport)
+               esw_disable_vport(esw, vport);
+}
+
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+{
        int err;
-       int i, enabled_events;
 
        if (!ESW_ALLOWED(esw) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
@@ -1788,44 +1871,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
        if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
                esw_warn(esw->dev, "engress ACL is not supported by FW\n");
 
+       esw_create_tsar(esw);
+
        esw->mode = mode;
 
        mlx5_lag_update(esw->dev);
 
        if (mode == MLX5_ESWITCH_LEGACY) {
-               err = esw_create_legacy_table(esw);
-               if (err)
-                       goto abort;
+               err = esw_legacy_enable(esw);
        } else {
                mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
                mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-               err = esw_offloads_init(esw);
+               err = esw_offloads_enable(esw);
        }
 
        if (err)
                goto abort;
 
-       err = esw_create_tsar(esw);
-       if (err)
-               esw_warn(esw->dev, "Failed to create eswitch TSAR");
-
-       enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
-               UC_ADDR_CHANGE;
-
-       /* Enable PF vport */
-       vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
-       esw_enable_vport(esw, vport, enabled_events);
-
-       /* Enable ECPF vports */
-       if (mlx5_ecpf_vport_exists(esw->dev)) {
-               vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
-               esw_enable_vport(esw, vport, enabled_events);
-       }
-
-       /* Enable VF vports */
-       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
-               esw_enable_vport(esw, vport, enabled_events);
-
        mlx5_eswitch_event_handlers_register(esw);
 
        esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
@@ -1847,10 +1909,7 @@ abort:
 
 void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
 {
-       struct esw_mc_addr *mc_promisc;
-       struct mlx5_vport *vport;
        int old_mode;
-       int i;
 
        if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
                return;
@@ -1859,21 +1918,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
                 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
                 esw->esw_funcs.num_vfs, esw->enabled_vports);
 
-       mc_promisc = &esw->mc_promisc;
        mlx5_eswitch_event_handlers_unregister(esw);
 
-       mlx5_esw_for_all_vports(esw, i, vport)
-               esw_disable_vport(esw, vport);
-
-       if (mc_promisc && mc_promisc->uplink_rule)
-               mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
-       esw_destroy_tsar(esw);
-
        if (esw->mode == MLX5_ESWITCH_LEGACY)
-               esw_destroy_legacy_table(esw);
+               esw_legacy_disable(esw);
        else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
-               esw_offloads_cleanup(esw);
+               esw_offloads_disable(esw);
+
+       esw_destroy_tsar(esw);
 
        old_mode = esw->mode;
        esw->mode = MLX5_ESWITCH_NONE;
index a38e8a3c7c9a3b71b6a51c9f440b25878508340f..d447e1e44d59dc0e21df4ba1a00277bf68683850 100644 (file)
@@ -101,6 +101,13 @@ struct mlx5_vport_info {
        bool                    trusted;
 };
 
+/* Vport context events */
+enum mlx5_eswitch_vport_event {
+       MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
+       MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
+       MLX5_VPORT_PROMISC_CHANGE = BIT(3),
+};
+
 struct mlx5_vport {
        struct mlx5_core_dev    *dev;
        int                     vport;
@@ -122,7 +129,7 @@ struct mlx5_vport {
        } qos;
 
        bool                    enabled;
-       u16                     enabled_events;
+       enum mlx5_eswitch_vport_event enabled_events;
 };
 
 enum offloads_fdb_flags {
@@ -207,8 +214,11 @@ enum {
 struct mlx5_eswitch {
        struct mlx5_core_dev    *dev;
        struct mlx5_nb          nb;
+       /* legacy data structures */
        struct mlx5_eswitch_fdb fdb_table;
        struct hlist_head       mc_table[MLX5_L2_ADDR_HASH_SIZE];
+       struct esw_mc_addr mc_promisc;
+       /* end of legacy */
        struct workqueue_struct *work_queue;
        struct mlx5_vport       *vports;
        u32 flags;
@@ -218,7 +228,6 @@ struct mlx5_eswitch {
         * and async SRIOV admin state changes
         */
        struct mutex            state_lock;
-       struct esw_mc_addr      mc_promisc;
 
        struct {
                bool            enabled;
@@ -233,8 +242,8 @@ struct mlx5_eswitch {
        struct mlx5_esw_functions esw_funcs;
 };
 
-void esw_offloads_cleanup(struct mlx5_eswitch *esw);
-int esw_offloads_init(struct mlx5_eswitch *esw);
+void esw_offloads_disable(struct mlx5_eswitch *esw);
+int esw_offloads_enable(struct mlx5_eswitch *esw);
 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
 int esw_offloads_init_reps(struct mlx5_eswitch *esw);
 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -513,6 +522,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
             (vport) = &(esw)->vports[i],               \
             (i) < (esw)->total_vports; (i)++)
 
+#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
+       for ((i) = (esw)->total_vports - 1;             \
+            (vport) = &(esw)->vports[i],               \
+            (i) >= MLX5_VPORT_PF; (i)--)
+
 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)        \
        for ((i) = MLX5_VPORT_FIRST_VF;                 \
             (vport) = &(esw)->vports[(i)],             \
@@ -574,6 +588,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
 void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
 
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+                                enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+
 #else  /* CONFIG_MLX5_ESWITCH */
 /* eswitch API stubs */
 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
index 089ae4d48a82dda15e47c49530f9a4e513577acb..db01b8ee9385e40580cd11326816fb6f0c0c5d79 100644 (file)
@@ -587,38 +587,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
        mlx5_del_flow_rules(rule);
 }
 
-static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
+static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
 {
        u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
        u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
        u8 fdb_to_vport_reg_c_id;
        int err;
 
-       err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
-                                                  out, sizeof(out));
-       if (err)
-               return err;
-
-       fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
-                                        esw_vport_context.fdb_to_vport_reg_c_id);
-
-       fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
-       MLX5_SET(modify_esw_vport_context_in, in,
-                esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
-
-       MLX5_SET(modify_esw_vport_context_in, in,
-                field_select.fdb_to_vport_reg_c_id, 1);
-
-       return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
-                                                    in, sizeof(in));
-}
-
-static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
-{
-       u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
-       u8 fdb_to_vport_reg_c_id;
-       int err;
+       if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+               return 0;
 
        err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
                                                   out, sizeof(out));
@@ -628,7 +605,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
        fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
                                         esw_vport_context.fdb_to_vport_reg_c_id);
 
-       fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+       if (enable)
+               fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+       else
+               fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
 
        MLX5_SET(modify_esw_vport_context_in, in,
                 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
@@ -2124,7 +2104,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
        return NOTIFY_OK;
 }
 
-int esw_offloads_init(struct mlx5_eswitch *esw)
+int esw_offloads_enable(struct mlx5_eswitch *esw)
 {
        int err;
 
@@ -2138,11 +2118,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
        if (err)
                return err;
 
-       if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
-               err = mlx5_eswitch_enable_passing_vport_metadata(esw);
-               if (err)
-                       goto err_vport_metadata;
-       }
+       err = esw_set_passing_vport_metadata(esw, true);
+       if (err)
+               goto err_vport_metadata;
+
+       mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
 
        err = esw_offloads_load_all_reps(esw);
        if (err)
@@ -2156,8 +2136,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
        return 0;
 
 err_reps:
-       if (mlx5_eswitch_vport_match_metadata_enabled(esw))
-               mlx5_eswitch_disable_passing_vport_metadata(esw);
+       mlx5_eswitch_disable_pf_vf_vports(esw);
+       esw_set_passing_vport_metadata(esw, false);
 err_vport_metadata:
        esw_offloads_steering_cleanup(esw);
        return err;
@@ -2182,13 +2162,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
        return err;
 }
 
-void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+void esw_offloads_disable(struct mlx5_eswitch *esw)
 {
        mlx5_rdma_disable_roce(esw->dev);
        esw_offloads_devcom_cleanup(esw);
        esw_offloads_unload_all_reps(esw);
-       if (mlx5_eswitch_vport_match_metadata_enabled(esw))
-               mlx5_eswitch_disable_passing_vport_metadata(esw);
+       mlx5_eswitch_disable_pf_vf_vports(esw);
+       esw_set_passing_vport_metadata(esw, false);
        esw_offloads_steering_cleanup(esw);
        esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
 }
index 7ac1249eadc33c7962d1da4e7250fd911060de0e..b84a225bbe86cfe0a50b85caa645eb5da516e3bc 100644 (file)
@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+                          enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+                          u32 *id)
 {
        u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
        u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
 
        MLX5_SET(alloc_flow_counter_in, in, opcode,
                 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+       MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
 
        err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
        if (!err)
@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
        return err;
 }
 
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+{
+       return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
+}
+
 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
 {
        u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]   = {0};
@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
        return 0;
 }
 
-struct mlx5_cmd_fc_bulk {
-       u32 id;
-       int num;
-       int outlen;
-       u32 out[0];
-};
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
-{
-       struct mlx5_cmd_fc_bulk *b;
-       int outlen =
-               MLX5_ST_SZ_BYTES(query_flow_counter_out) +
-               MLX5_ST_SZ_BYTES(traffic_counter) * num;
-
-       b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
-       if (!b)
-               return NULL;
-
-       b->id = id;
-       b->num = num;
-       b->outlen = outlen;
-
-       return b;
-}
-
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
 {
-       kfree(b);
+       return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+               MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
 }
 
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+                          u32 *out)
 {
+       int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
        u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
 
        MLX5_SET(query_flow_counter_in, in, opcode,
                 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
        MLX5_SET(query_flow_counter_in, in, op_mod, 0);
-       MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
-       MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
-       return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
-}
-
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
-                         struct mlx5_cmd_fc_bulk *b, u32 id,
-                         u64 *packets, u64 *bytes)
-{
-       int index = id - b->id;
-       void *stats;
-
-       if (index < 0 || index >= b->num) {
-               mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
-                              id, b->id, b->id + b->num - 1);
-               return;
-       }
-
-       stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
-                            flow_statistics[index]);
-       *packets = MLX5_GET64(traffic_counter, stats, packets);
-       *bytes = MLX5_GET64(traffic_counter, stats, octets);
+       MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
+       MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
+       return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
 }
 
 int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
index e340f9af2f5a7525393a56a82062d6990f180361..bc4606306009843a3f9e6dba5e94d5f4b8c4d6e4 100644 (file)
@@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
 };
 
 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+                          enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+                          u32 *id);
 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
                      u64 *packets, u64 *bytes);
 
-struct mlx5_cmd_fc_bulk;
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
-                         struct mlx5_cmd_fc_bulk *b, u32 id,
-                         u64 *packets, u64 *bytes);
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+                          u32 *out);
 
 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
 
index 1834d9f3aa1c019ee9057b141bfe86068ac757c2..0c2a704dca4227cb79eeb58eacbf1fd8ea87c648 100644 (file)
@@ -75,7 +75,7 @@ struct mlx5_fc {
  * access to counter list:
  * - create (user context)
  *   - mlx5_fc_create() only adds to an addlist to be used by
- *     mlx5_fc_stats_query_work(). addlist is a lockless single linked list
+ *     mlx5_fc_stats_work(). addlist is a lockless single linked list
  *     that doesn't require any additional synchronization when adding single
  *     node.
  *   - spawn thread to do the actual destroy
@@ -136,72 +136,69 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
        spin_unlock(&fc_stats->counters_idr_lock);
 }
 
-/* The function returns the last counter that was queried so the caller
- * function can continue calling it till all counters are queried.
- */
-static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
-                                          struct mlx5_fc *first,
-                                          u32 last_id)
+static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
 {
-       struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-       struct mlx5_fc *counter = NULL;
-       struct mlx5_cmd_fc_bulk *b;
-       bool more = false;
-       u32 afirst_id;
-       int num;
-       int err;
+       return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
+                         (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+}
 
-       int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
-                            (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+static void update_counter_cache(int index, u32 *bulk_raw_data,
+                                struct mlx5_fc_cache *cache)
+{
+       void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
+                            flow_statistics[index]);
+       u64 packets = MLX5_GET64(traffic_counter, stats, packets);
+       u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
 
-       /* first id must be aligned to 4 when using bulk query */
-       afirst_id = first->id & ~0x3;
+       if (cache->packets == packets)
+               return;
 
-       /* number of counters to query inc. the last counter */
-       num = ALIGN(last_id - afirst_id + 1, 4);
-       if (num > max_bulk) {
-               num = max_bulk;
-               last_id = afirst_id + num - 1;
-       }
+       cache->packets = packets;
+       cache->bytes = bytes;
+       cache->lastuse = jiffies;
+}
 
-       b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
-       if (!b) {
-               mlx5_core_err(dev, "Error allocating resources for bulk query\n");
-               return NULL;
-       }
+static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
+                                             struct mlx5_fc *first,
+                                             u32 last_id)
+{
+       struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+       bool query_more_counters = (first->id <= last_id);
+       int max_bulk_len = get_max_bulk_query_len(dev);
+       u32 *data = fc_stats->bulk_query_out;
+       struct mlx5_fc *counter = first;
+       u32 bulk_base_id;
+       int bulk_len;
+       int err;
 
-       err = mlx5_cmd_fc_bulk_query(dev, b);
-       if (err) {
-               mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
-               goto out;
-       }
+       while (query_more_counters) {
+               /* first id must be aligned to 4 when using bulk query */
+               bulk_base_id = counter->id & ~0x3;
 
-       counter = first;
-       list_for_each_entry_from(counter, &fc_stats->counters, list) {
-               struct mlx5_fc_cache *c = &counter->cache;
-               u64 packets;
-               u64 bytes;
+               /* number of counters to query inc. the last counter */
+               bulk_len = min_t(int, max_bulk_len,
+                                ALIGN(last_id - bulk_base_id + 1, 4));
 
-               if (counter->id > last_id) {
-                       more = true;
-                       break;
+               err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
+                                            data);
+               if (err) {
+                       mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+                       return;
                }
+               query_more_counters = false;
 
-               mlx5_cmd_fc_bulk_get(dev, b,
-                                    counter->id, &packets, &bytes);
+               list_for_each_entry_from(counter, &fc_stats->counters, list) {
+                       int counter_index = counter->id - bulk_base_id;
+                       struct mlx5_fc_cache *cache = &counter->cache;
 
-               if (c->packets == packets)
-                       continue;
+                       if (counter->id >= bulk_base_id + bulk_len) {
+                               query_more_counters = true;
+                               break;
+                       }
 
-               c->packets = packets;
-               c->bytes = bytes;
-               c->lastuse = jiffies;
+                       update_counter_cache(counter_index, data, cache);
+               }
        }
-
-out:
-       mlx5_cmd_fc_bulk_free(b);
-
-       return more ? counter : NULL;
 }
 
 static void mlx5_free_fc(struct mlx5_core_dev *dev,
@@ -244,8 +241,8 @@ static void mlx5_fc_stats_work(struct work_struct *work)
 
        counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
                                   list);
-       while (counter)
-               counter = mlx5_fc_stats_query(dev, counter, last->id);
+       if (counter)
+               mlx5_fc_stats_query_counter_range(dev, counter, last->id);
 
        fc_stats->next_query = now + fc_stats->sampling_interval;
 }
@@ -324,6 +321,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
 {
        struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+       int max_bulk_len;
+       int max_out_len;
 
        spin_lock_init(&fc_stats->counters_idr_lock);
        idr_init(&fc_stats->counters_idr);
@@ -331,14 +330,24 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
        init_llist_head(&fc_stats->addlist);
        init_llist_head(&fc_stats->dellist);
 
+       max_bulk_len = get_max_bulk_query_len(dev);
+       max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
+       fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
+       if (!fc_stats->bulk_query_out)
+               return -ENOMEM;
+
        fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
        if (!fc_stats->wq)
-               return -ENOMEM;
+               goto err_wq_create;
 
        fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
        INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
 
        return 0;
+
+err_wq_create:
+       kfree(fc_stats->bulk_query_out);
+       return -ENOMEM;
 }
 
 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
@@ -352,6 +361,8 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
        destroy_workqueue(dev->priv.fc_stats.wq);
        dev->priv.fc_stats.wq = NULL;
 
+       kfree(fc_stats->bulk_query_out);
+
        idr_destroy(&fc_stats->counters_idr);
 
        tmplist = llist_del_all(&fc_stats->addlist);
index b15b27a497fc3fe20b8a9cb9e1b435183fc17707..fa0e991f19835e433133853589c991ae9436f2f0 100644 (file)
@@ -1217,8 +1217,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
 {
        int err = 0;
 
-       if (cleanup)
+       if (cleanup) {
+               mlx5_unregister_device(dev);
                mlx5_drain_health_wq(dev);
+       }
 
        mutex_lock(&dev->intf_state_mutex);
        if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1369,7 +1371,6 @@ static void remove_one(struct pci_dev *pdev)
 
        mlx5_crdump_disable(dev);
        mlx5_devlink_unregister(devlink);
-       mlx5_unregister_device(dev);
 
        if (mlx5_unload_one(dev, true)) {
                mlx5_core_err(dev, "mlx5_unload_one failed\n");
index b8ba74de95558f84c29b26c80fb1ccb30889b83f..c3aea4cc2fff859b7d0d39bdb94a9cad069f3e45 100644 (file)
@@ -53,7 +53,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
 
        common = radix_tree_lookup(&table->tree, rsn);
        if (common)
-               atomic_inc(&common->refcount);
+               refcount_inc(&common->refcount);
 
        spin_unlock_irqrestore(&table->lock, flags);
 
@@ -62,7 +62,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
 
 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
 {
-       if (atomic_dec_and_test(&common->refcount))
+       if (refcount_dec_and_test(&common->refcount))
                complete(&common->free);
 }
 
@@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
 
        common = mlx5_get_rsc(table, rsn);
        if (!common) {
-               mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
+               mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
                return NOTIFY_OK;
        }
 
@@ -209,7 +209,7 @@ static int create_resource_common(struct mlx5_core_dev *dev,
        if (err)
                return err;
 
-       atomic_set(&qp->common.refcount, 1);
+       refcount_set(&qp->common.refcount, 1);
        init_completion(&qp->common.free);
        qp->pid = current->pid;
 
index bc86dffdc43c524574412b53345ea948acc2e446..01c380425f9d385b0901d70d2adbe25f695a8572 100644 (file)
@@ -188,8 +188,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
                /* new rate limit */
                err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
                if (err) {
-                       mlx5_core_err(dev, "Failed configuring rate limit(err %d): \
-                                     rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
+                       mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
                                      err, rl->rate, rl->max_burst_sz,
                                      rl->typical_pkt_sz);
                        goto out;
@@ -218,8 +217,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
        mutex_lock(&table->rl_lock);
        entry = find_rl_entry(table, rl);
        if (!entry || !entry->refcount) {
-               mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u \
-                              are not configured\n",
+               mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
                               rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
                goto out;
        }
index 158ac07389118278766ae3c9836a13d8f5307981..38b1f402f7ed299c3bbaa7090c9336ddfa7062fb 100644 (file)
@@ -798,9 +798,8 @@ static int qed_rdma_add_user(void *rdma_cxt,
        /* Calculate the corresponding DPI address */
        dpi_start_offset = p_hwfn->dpi_start_offset;
 
-       out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
-                                    dpi_start_offset +
-                                    ((out_params->dpi) * p_hwfn->dpi_size));
+       out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset +
+                              out_params->dpi * p_hwfn->dpi_size;
 
        out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
                                    dpi_start_offset +
index cd07e5301d42e79620f3ccebbe3d90266441befd..3c91fa97c9a852b9b0e724636fac95d1fdc66ec4 100644 (file)
@@ -1654,15 +1654,17 @@ static struct smbd_connection *_smbd_get_connection(
 
        info->send_cq = NULL;
        info->recv_cq = NULL;
-       info->send_cq = ib_alloc_cq(info->id->device, info,
-                       info->send_credit_target, 0, IB_POLL_SOFTIRQ);
+       info->send_cq =
+               ib_alloc_cq_any(info->id->device, info,
+                               info->send_credit_target, IB_POLL_SOFTIRQ);
        if (IS_ERR(info->send_cq)) {
                info->send_cq = NULL;
                goto alloc_cq_failed;
        }
 
-       info->recv_cq = ib_alloc_cq(info->id->device, info,
-                       info->receive_credit_max, 0, IB_POLL_SOFTIRQ);
+       info->recv_cq =
+               ib_alloc_cq_any(info->id->device, info,
+                               info->receive_credit_max, IB_POLL_SOFTIRQ);
        if (IS_ERR(info->recv_cq)) {
                info->recv_cq = NULL;
                goto alloc_cq_failed;
index c38f0d46b267cb2d747eb9d26bf2599d03947be5..fc2aa4e2065811643b94399bb3eae1fa403b688c 100644 (file)
@@ -945,12 +945,6 @@ header-test-                       += net/xdp.h
 header-test-                   += net/xdp_priv.h
 header-test-                   += pcmcia/cistpl.h
 header-test-                   += pcmcia/ds.h
-header-test-                   += rdma/ib.h
-header-test-                   += rdma/iw_portmap.h
-header-test-                   += rdma/opa_port_info.h
-header-test-                   += rdma/rdmavt_cq.h
-header-test-                   += rdma/restrack.h
-header-test-                   += rdma/signature.h
 header-test-                   += rdma/tid_rdma_defs.h
 header-test-                   += scsi/fc/fc_encaps.h
 header-test-                   += scsi/fc/fc_fc2.h
index ce9839c8bc1a6ca539628f078fc8bf7471a84ce7..e427af260ebec12c6174a8545e505279877c5426 100644 (file)
@@ -328,6 +328,7 @@ enum mlx5_event {
        MLX5_EVENT_TYPE_GPIO_EVENT         = 0x15,
        MLX5_EVENT_TYPE_PORT_MODULE_EVENT  = 0x16,
        MLX5_EVENT_TYPE_TEMP_WARN_EVENT    = 0x17,
+       MLX5_EVENT_TYPE_XRQ_ERROR          = 0x18,
        MLX5_EVENT_TYPE_REMOTE_CONFIG      = 0x19,
        MLX5_EVENT_TYPE_GENERAL_EVENT      = 0x22,
        MLX5_EVENT_TYPE_MONITOR_COUNTER    = 0x24,
@@ -345,6 +346,7 @@ enum mlx5_event {
        MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
 
        MLX5_EVENT_TYPE_DCT_DRAINED        = 0x1c,
+       MLX5_EVENT_TYPE_DCT_KEY_VIOLATION  = 0x1d,
 
        MLX5_EVENT_TYPE_FPGA_ERROR         = 0x20,
        MLX5_EVENT_TYPE_FPGA_QP_ERROR      = 0x21,
@@ -584,6 +586,12 @@ struct mlx5_eqe_cq_err {
        u8      syndrome;
 };
 
+struct mlx5_eqe_xrq_err {
+       __be32  reserved1[5];
+       __be32  type_xrqn;
+       __be32  reserved2;
+};
+
 struct mlx5_eqe_port_state {
        u8      reserved0[8];
        u8      port;
@@ -698,6 +706,7 @@ union ev_data {
        struct mlx5_eqe_pps             pps;
        struct mlx5_eqe_dct             dct;
        struct mlx5_eqe_temp_warning    temp_warning;
+       struct mlx5_eqe_xrq_err         xrq_err;
 } __packed;
 
 struct mlx5_eqe {
index 0e6da1840c7d84e7a2056cf80233f7104b64637b..0acd28f2e62c3d20540d798ef276ef021c2022de 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/interrupt.h>
 #include <linux/idr.h>
 #include <linux/notifier.h>
+#include <linux/refcount.h>
 
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
@@ -398,7 +399,7 @@ enum mlx5_res_type {
 
 struct mlx5_core_rsc_common {
        enum mlx5_res_type      res;
-       atomic_t                refcount;
+       refcount_t              refcount;
        struct completion       free;
 };
 
@@ -488,6 +489,7 @@ struct mlx5_fc_stats {
        struct delayed_work work;
        unsigned long next_query;
        unsigned long sampling_interval; /* jiffies */
+       u32 *bulk_query_out;
 };
 
 struct mlx5_events;
index ec571fd7fcf89299e4ef17b17142fce13059de6e..f648d1645287c390bfecf102449259135af222bc 100644 (file)
@@ -172,6 +172,8 @@ enum {
        MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY     = 0x725,
        MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY       = 0x726,
        MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS        = 0x727,
+       MLX5_CMD_OP_RELEASE_XRQ_ERROR             = 0x729,
+       MLX5_CMD_OP_MODIFY_XRQ                    = 0x72a,
        MLX5_CMD_OP_QUERY_ESW_FUNCTIONS           = 0x740,
        MLX5_CMD_OP_QUERY_VPORT_STATE             = 0x750,
        MLX5_CMD_OP_MODIFY_VPORT_STATE            = 0x751,
@@ -1040,6 +1042,21 @@ enum {
        MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
 };
 
+#define MLX5_FC_BULK_SIZE_FACTOR 128
+
+enum mlx5_fc_bulk_alloc_bitmask {
+       MLX5_FC_BULK_128   = (1 << 0),
+       MLX5_FC_BULK_256   = (1 << 1),
+       MLX5_FC_BULK_512   = (1 << 2),
+       MLX5_FC_BULK_1024  = (1 << 3),
+       MLX5_FC_BULK_2048  = (1 << 4),
+       MLX5_FC_BULK_4096  = (1 << 5),
+       MLX5_FC_BULK_8192  = (1 << 6),
+       MLX5_FC_BULK_16384 = (1 << 7),
+};
+
+#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+
 struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_0[0x30];
        u8         vhca_id[0x10];
@@ -1244,7 +1261,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_2e0[0x7];
        u8         max_qp_mcg[0x19];
 
-       u8         reserved_at_300[0x18];
+       u8         reserved_at_300[0x10];
+       u8         flow_counter_bulk_alloc[0x8];
        u8         log_max_mcg[0x8];
 
        u8         reserved_at_320[0x3];
@@ -2766,7 +2784,7 @@ struct mlx5_ifc_traffic_counter_bits {
 struct mlx5_ifc_tisc_bits {
        u8         strict_lag_tx_port_affinity[0x1];
        u8         tls_en[0x1];
-       u8         reserved_at_1[0x2];
+       u8         reserved_at_2[0x2];
        u8         lag_tx_port_affinity[0x04];
 
        u8         reserved_at_8[0x4];
@@ -2941,6 +2959,13 @@ enum {
        SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
 };
 
+enum {
+       ELEMENT_TYPE_CAP_MASK_TASR              = 1 << 0,
+       ELEMENT_TYPE_CAP_MASK_VPORT             = 1 << 1,
+       ELEMENT_TYPE_CAP_MASK_VPORT_TC          = 1 << 2,
+       ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC     = 1 << 3,
+};
+
 struct mlx5_ifc_scheduling_context_bits {
        u8         element_type[0x8];
        u8         reserved_at_8[0x18];
@@ -7817,7 +7842,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
 
-       u8         reserved_at_40[0x40];
+       u8         reserved_at_40[0x38];
+       u8         flow_counter_bulk[0x8];
 };
 
 struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
@@ -9570,8 +9596,6 @@ struct mlx5_ifc_query_lag_out_bits {
 
        u8         syndrome[0x20];
 
-       u8         reserved_at_40[0x40];
-
        struct mlx5_ifc_lagc_bits ctx;
 };
 
index 898f595ea3d63c2a9eafe976425e6bf685079ec3..74efca15fde7dd9841a6f4a92efea982ef16c3a3 100644 (file)
@@ -225,7 +225,7 @@ struct qed_rdma_start_in_params {
 
 struct qed_rdma_add_user_out_params {
        u16 dpi;
-       u64 dpi_addr;
+       void __iomem *dpi_addr;
        u64 dpi_phys_addr;
        u32 dpi_size;
        u16 wid_count;
index 4f385ec54f80ce0a1afcbf009a42c2a1cb1285f3..fe2fc9e91588de93a1c58b55194b3d974a09d9da 100644 (file)
@@ -36,6 +36,8 @@
 #include <linux/types.h>
 #include <linux/sched.h>
 #include <linux/cred.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
 
 struct ib_addr {
        union {
index c2b39dda44cc6fc0c07492ef8d28b674c95d825b..de5bc352f473d102efbdc3f4a0817d3a5ee56276 100644 (file)
@@ -98,15 +98,54 @@ void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
 #if defined(CONFIG_DYNAMIC_DEBUG)
 #define ibdev_dbg(__dev, format, args...)                       \
        dynamic_ibdev_dbg(__dev, format, ##args)
-#elif defined(DEBUG)
-#define ibdev_dbg(__dev, format, args...)                       \
-       ibdev_printk(KERN_DEBUG, __dev, format, ##args)
 #else
 __printf(2, 3) __cold
 static inline
 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
 #endif
 
+#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...)           \
+do {                                                                    \
+       static DEFINE_RATELIMIT_STATE(_rs,                              \
+                                     DEFAULT_RATELIMIT_INTERVAL,       \
+                                     DEFAULT_RATELIMIT_BURST);         \
+       if (__ratelimit(&_rs))                                          \
+               ibdev_level(ibdev, fmt, ##__VA_ARGS__);                 \
+} while (0)
+
+#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
+       ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
+       ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
+       ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_err_ratelimited(ibdev, fmt, ...) \
+       ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
+       ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
+       ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
+#define ibdev_info_ratelimited(ibdev, fmt, ...) \
+       ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define ibdev_dbg_ratelimited(ibdev, fmt, ...)                          \
+do {                                                                    \
+       static DEFINE_RATELIMIT_STATE(_rs,                              \
+                                     DEFAULT_RATELIMIT_INTERVAL,       \
+                                     DEFAULT_RATELIMIT_BURST);         \
+       DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
+       if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs))      \
+               __dynamic_ibdev_dbg(&descriptor, ibdev, fmt,            \
+                                   ##__VA_ARGS__);                     \
+} while (0)
+#else
+__printf(2, 3) __cold
+static inline
+void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
+#endif
+
 union ib_gid {
        u8      raw[16];
        struct {
@@ -451,6 +490,16 @@ enum ib_port_state {
        IB_PORT_ACTIVE_DEFER    = 5
 };
 
+enum ib_port_phys_state {
+       IB_PORT_PHYS_STATE_SLEEP = 1,
+       IB_PORT_PHYS_STATE_POLLING = 2,
+       IB_PORT_PHYS_STATE_DISABLED = 3,
+       IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
+       IB_PORT_PHYS_STATE_LINK_UP = 5,
+       IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
+       IB_PORT_PHYS_STATE_PHY_TEST = 7,
+};
+
 enum ib_port_width {
        IB_WIDTH_1X     = 1,
        IB_WIDTH_2X     = 16,
@@ -3713,6 +3762,25 @@ static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
                                NULL);
 }
 
+struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
+                               int nr_cqe, enum ib_poll_context poll_ctx,
+                               const char *caller);
+
+/**
+ * ib_alloc_cq_any: Allocate kernel CQ
+ * @dev: The IB device
+ * @private: Private data attached to the CQE
+ * @nr_cqe: Number of CQEs in the CQ
+ * @poll_ctx: Context used for polling the CQ
+ */
+static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
+                                           void *private, int nr_cqe,
+                                           enum ib_poll_context poll_ctx)
+{
+       return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
+                                KBUILD_MODNAME);
+}
+
 /**
  * ib_free_cq_user - Free kernel/user CQ
  * @cq: The CQ to free
index b9fee7feeeb5a55aa8a2b85155038dd94e78fd6c..c89535047c42cd8b363786ae6a65bf37c771d79d 100644 (file)
@@ -33,6 +33,9 @@
 #ifndef _IW_PORTMAP_H
 #define _IW_PORTMAP_H
 
+#include <linux/socket.h>
+#include <linux/netlink.h>
+
 #define IWPM_ULIBNAME_SIZE     32
 #define IWPM_DEVNAME_SIZE      32
 #define IWPM_IFNAME_SIZE       16
index 7147a92630114d9168e76e0f8e50caf90dbee30d..bdbfe25d38548be3d112d60c2927ee74dd98a9e2 100644 (file)
@@ -33,6 +33,8 @@
 #if !defined(OPA_PORT_INFO_H)
 #define OPA_PORT_INFO_H
 
+#include <rdma/opa_smi.h>
+
 #define OPA_PORT_LINK_MODE_NOP 0               /* No change */
 #define OPA_PORT_LINK_MODE_OPA 4               /* Port mode is OPA */
 
index 6631624e4d7c542dd196387d82be263a7e61be4f..ab22759de7ea087adff0b5b570383f61eccd4133 100644 (file)
@@ -76,28 +76,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 /**
  * Send the supplied skb to a specific userspace PID.
+ * @net: Net namespace in which to send the skb
  * @skb: The netlink skb
  * @pid: Userspace netlink process ID
  * Returns 0 on success or a negative error code.
  */
-int rdma_nl_unicast(struct sk_buff *skb, u32 pid);
+int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid);
 
 /**
  * Send, with wait/1 retry, the supplied skb to a specific userspace PID.
+ * @net: Net namespace in which to send the skb
  * @skb: The netlink skb
  * @pid: Userspace netlink process ID
  * Returns 0 on success or a negative error code.
  */
-int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid);
+int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid);
 
 /**
  * Send the supplied skb to a netlink group.
+ * @net: Net namespace in which to send the skb
  * @skb: The netlink skb
  * @group: Netlink group ID
  * @flags: allocation flags
  * Returns 0 on success or a negative error code.
  */
-int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags);
+int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
+                     unsigned int group, gfp_t flags);
 
 /**
  * Check if there are any listeners to the netlink group
index 04c519ef6d715b30adf603b2445180a9874325e2..574eb7278f468f797a9dbe10e14a2492c6d88ac7 100644 (file)
@@ -53,6 +53,7 @@
 
 #include <linux/kthread.h>
 #include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
 
 /*
  * Define an ib_cq_notify value that is not valid so we know when CQ
index f24cc2a1d3c5d961a9d284ac79d01798232b936c..d16b0fcc8344b32c3d2eb5466b35d860f7979e91 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef _RDMA_SIGNATURE_H_
 #define _RDMA_SIGNATURE_H_
 
+#include <linux/types.h>
+
 enum ib_signature_prot_cap {
        IB_PROT_T10DIF_TYPE_1 = 1,
        IB_PROT_T10DIF_TYPE_2 = 1 << 1,
index bac8dad5dd690a4b7eb371baeaf65fb9768aed23..b21c3c209815923d7196d4a525900d4c3fd2ba34 100644 (file)
@@ -685,9 +685,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
                goto error;
 
        /* Create the Completion Queue */
-       rdma->cq = ib_alloc_cq(rdma->cm_id->device, client,
-                       opts.sq_depth + opts.rq_depth + 1,
-                       0, IB_POLL_SOFTIRQ);
+       rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client,
+                                  opts.sq_depth + opts.rq_depth + 1,
+                                  IB_POLL_SOFTIRQ);
        if (IS_ERR(rdma->cq))
                goto error;
 
index 3fe665152d954a650cd01751cabbe548a0a24d0e..4d3db6ee7f09ca7eae3d4ac239e5dbac452851cc 100644 (file)
@@ -454,14 +454,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
                dprintk("svcrdma: error creating PD for connect request\n");
                goto errout;
        }
-       newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
-                                       0, IB_POLL_WORKQUEUE);
+       newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
+                                           IB_POLL_WORKQUEUE);
        if (IS_ERR(newxprt->sc_sq_cq)) {
                dprintk("svcrdma: error creating SQ CQ for connect request\n");
                goto errout;
        }
-       newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth,
-                                       0, IB_POLL_WORKQUEUE);
+       newxprt->sc_rq_cq =
+               ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
        if (IS_ERR(newxprt->sc_rq_cq)) {
                dprintk("svcrdma: error creating RQ CQ for connect request\n");
                goto errout;
index 805b1f35e1caae32664794814c863f4e216d236a..b10aa16557f00d104bae3b757e83adccbfe68228 100644 (file)
@@ -521,18 +521,17 @@ int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
        init_waitqueue_head(&ep->rep_connect_wait);
        ep->rep_receive_count = 0;
 
-       sendcq = ib_alloc_cq(ia->ri_id->device, NULL,
-                            ep->rep_attr.cap.max_send_wr + 1,
-                            ia->ri_id->device->num_comp_vectors > 1 ? 1 : 0,
-                            IB_POLL_WORKQUEUE);
+       sendcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
+                                ep->rep_attr.cap.max_send_wr + 1,
+                                IB_POLL_WORKQUEUE);
        if (IS_ERR(sendcq)) {
                rc = PTR_ERR(sendcq);
                goto out1;
        }
 
-       recvcq = ib_alloc_cq(ia->ri_id->device, NULL,
-                            ep->rep_attr.cap.max_recv_wr + 1,
-                            0, IB_POLL_WORKQUEUE);
+       recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
+                                ep->rep_attr.cap.max_recv_wr + 1,
+                                IB_POLL_WORKQUEUE);
        if (IS_ERR(recvcq)) {
                rc = PTR_ERR(recvcq);
                goto out2;