nvme: use a single NVME_AQ_DEPTH and relax it to 32
authorSagi Grimberg <sagi@grimberg.me>
Sun, 18 Jun 2017 13:15:59 +0000 (16:15 +0300)
committerJens Axboe <axboe@kernel.dk>
Wed, 28 Jun 2017 14:14:13 +0000 (08:14 -0600)
No need to differentiate fabrics from pci/loop, also lower
it to 32 as we don't really need 256 inflight admin commands.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/discovery.c
drivers/nvme/target/loop.c
drivers/nvme/target/rdma.c
include/linux/nvme.h

index 7ca2d4d70aece2ba0a2c631682ab026ee70d502a..a59a243b81c6b339ba5a95001c146165d1648df1 100644 (file)
@@ -392,13 +392,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        cmd.connect.opcode = nvme_fabrics_command;
        cmd.connect.fctype = nvme_fabrics_type_connect;
        cmd.connect.qid = 0;
-
-       /*
-        * fabrics spec sets a minimum of depth 32 for admin queue,
-        * so set the queue with this depth always until
-        * justification otherwise.
-        */
-       cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+       cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
 
        /*
         * Set keep-alive timeout in seconds granularity (ms * 1000)
index 5165007e86a618d55390990d7462b560a047471b..5d5ecefd8dbee3f383aa8d6ec7f3304208c05b02 100644 (file)
@@ -36,7 +36,7 @@
  */
 #define NVME_FC_NR_AEN_COMMANDS        1
 #define NVME_FC_AQ_BLKMQ_DEPTH \
-       (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
+       (NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
 #define AEN_CMDID_BASE         (NVME_FC_AQ_BLKMQ_DEPTH + 1)
 
 enum nvme_fc_queue_flags {
index 2a9ee769ce9e47dc117bfd70f570c776de13ee26..32a98e2740adf04cae518b88f1eeb00fc8c94206 100644 (file)
@@ -36,7 +36,6 @@
 #include "nvme.h"
 
 #define NVME_Q_DEPTH           1024
-#define NVME_AQ_DEPTH          256
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
 
index 01dc723e6acf823d4eb540f2b61034492f798671..bc0322bf7d2753f117aef4f624cf9481859f3c29 100644 (file)
@@ -48,7 +48,7 @@
  */
 #define NVME_RDMA_NR_AEN_COMMANDS      1
 #define NVME_RDMA_AQ_BLKMQ_DEPTH       \
-       (NVMF_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
+       (NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
 
 struct nvme_rdma_device {
        struct ib_device       *dev;
@@ -719,7 +719,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        if (ret)
                goto requeue;
 
-       ret = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+       ret = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH);
        if (ret)
                goto requeue;
 
@@ -1291,8 +1291,8 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
         * specified by the Fabrics standard.
         */
        if (priv.qid == 0) {
-               priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
-               priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+               priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
+               priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
        } else {
                /*
                 * current interpretation of the fabrics spec
@@ -1530,7 +1530,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
 {
        int error;
 
-       error = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+       error = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH);
        if (error)
                return error;
 
index c7a90384dd75357debe46e507b9843e2f25ba2b4..8f3b57b4c97bd1f05f84f258845a7f43cf2332f6 100644 (file)
@@ -53,7 +53,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
        e->portid = port->disc_addr.portid;
        /* we support only dynamic controllers */
        e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
-       e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
+       e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
        e->subtype = type;
        memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
        memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
index f67606523724fbd4fc449657051b56caba11629e..86c09e2a149002bd32d7dca5de6b3dc821e573f9 100644 (file)
@@ -21,8 +21,6 @@
 #include "../host/nvme.h"
 #include "../host/fabrics.h"
 
-#define NVME_LOOP_AQ_DEPTH             256
-
 #define NVME_LOOP_MAX_SEGMENTS         256
 
 /*
@@ -31,7 +29,7 @@
  */
 #define NVME_LOOP_NR_AEN_COMMANDS      1
 #define NVME_LOOP_AQ_BLKMQ_DEPTH       \
-       (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
+       (NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
 
 struct nvme_loop_iod {
        struct nvme_request     nvme_req;
index 9e45cde633767be2176770c7ebd71a90b3ca3094..32aa10b521c8ac96f60ca6f3e859d3feceab7021 100644 (file)
@@ -1027,7 +1027,7 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
        queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
        queue->send_queue_size = le16_to_cpu(req->hrqsize);
 
-       if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
+       if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
                return NVME_RDMA_CM_INVALID_HSQSIZE;
 
        /* XXX: Should we enforce some kind of max for IO queues? */
index f516a975bb219cb0a3220dd7cf11f35c83dddfb5..6b8ee9e628e1890e616b4708c0ceed22a903df86 100644 (file)
@@ -87,7 +87,7 @@ enum {
        NVMF_RDMA_CMS_RDMA_CM   = 1, /* Sockets based endpoint addressing */
 };
 
-#define NVMF_AQ_DEPTH          32
+#define NVME_AQ_DEPTH          32
 
 enum {
        NVME_REG_CAP    = 0x0000,       /* Controller Capabilities */