RDMA/efa: Expose minimum SQ size
authorGal Pressman <galpress@amazon.com>
Wed, 22 Jul 2020 14:03:10 +0000 (17:03 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Wed, 29 Jul 2020 12:23:39 +0000 (09:23 -0300)
The device reports the minimum SQ size required for creation.

This patch queries the min SQ size and reports it back to the userspace
library.

Link: https://lore.kernel.org/r/20200722140312.3651-3-galpress@amazon.com
Reviewed-by: Firas JahJah <firasj@amazon.com>
Reviewed-by: Shadi Ammouri <sammouri@amazon.com>
Signed-off-by: Gal Pressman <galpress@amazon.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
drivers/infiniband/hw/efa/efa_com_cmd.c
drivers/infiniband/hw/efa/efa_com_cmd.h
drivers/infiniband/hw/efa/efa_verbs.c
include/uapi/rdma/efa-abi.h

index 03e7388af06edc7e046f1df87c37a02bfa3e513e..5484b08bbc5da390c98584871a34b1f3aac98ab8 100644 (file)
@@ -606,8 +606,8 @@ struct efa_admin_feature_queue_attr_desc {
        /* Number of sub-CQs to be created for each CQ */
        u16 sub_cqs_per_cq;
 
-       /* MBZ */
-       u16 reserved;
+       /* Minimum number of WQEs per SQ */
+       u16 min_sq_depth;
 
        /* Maximum number of SGEs (buffers) allowed for a single send WQE */
        u16 max_wr_send_sges;
index 53cfde5c43d898ce029ad2ed1ebfd049a2f52885..6ac23627f65ad29993ea4cef879163f8f0cddbb3 100644 (file)
@@ -481,6 +481,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
        result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
        result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
        result->max_tx_batch = resp.u.queue_attr.max_tx_batch;
+       result->min_sq_depth = resp.u.queue_attr.min_sq_depth;
 
        err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
        if (err) {
index 8df2a26d57d464bf011efd1d6e7bd8028d597678..190bac23f5857453df380c73332e6a01aba66d73 100644 (file)
@@ -128,6 +128,7 @@ struct efa_com_get_device_attr_result {
        u16 max_rq_sge;
        u16 max_wr_rdma_sge;
        u16 max_tx_batch;
+       u16 min_sq_depth;
        u8 db_bar;
 };
 
index f49d14cebe4a478397438e9006a6a34962eef9db..26102ab333b27354d6fbbc71a41e41f5c037a5aa 100644 (file)
@@ -1526,6 +1526,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
        resp.inline_buf_size = dev->dev_attr.inline_buf_size;
        resp.max_llq_size = dev->dev_attr.max_llq_size;
        resp.max_tx_batch = dev->dev_attr.max_tx_batch;
+       resp.min_sq_wr = dev->dev_attr.min_sq_depth;
 
        if (udata && udata->outlen) {
                err = ib_copy_to_udata(udata, &resp,
index 10781763da37e0031c165eeabf26c0b15645ad05..7ef2306f8dd49b01e390b41827ab72aac0a5cb84 100644 (file)
@@ -32,7 +32,8 @@ struct efa_ibv_alloc_ucontext_resp {
        __u16 inline_buf_size;
        __u32 max_llq_size; /* bytes */
        __u16 max_tx_batch; /* units of 64 bytes */
-       __u8 reserved_90[6];
+       __u16 min_sq_wr;
+       __u8 reserved_a0[4];
 };
 
 struct efa_ibv_alloc_pd_resp {