block: Change the return type of blk_mq_map_queues() into void
authorBart Van Assche <bvanassche@acm.org>
Mon, 15 Aug 2022 17:00:43 +0000 (10:00 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 22 Aug 2022 16:07:53 +0000 (10:07 -0600)
Since blk_mq_map_queues() and the .map_queues() callbacks always return 0,
change their return type into void. Most callers ignore the returned value
anyway.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Keith Busch <kbusch@kernel.org>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: Doug Gilbert <dgilbert@interlog.com>
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: John Garry <john.garry@huawei.com>
Acked-by: Md Haris Iqbal <haris.iqbal@ionos.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Link: https://lore.kernel.org/r/20220815170043.19489-3-bvanassche@acm.org
[axboe: fold in fix from Bart]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
30 files changed:
block/blk-mq-cpumap.c
block/blk-mq-pci.c
block/blk-mq-rdma.c
block/blk-mq-virtio.c
block/blk-mq.c
drivers/block/null_blk/main.c
drivers/block/rnbd/rnbd-clt.c
drivers/block/virtio_blk.c
drivers/nvme/host/fc.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_lib.c
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/virtio_scsi.c
drivers/ufs/core/ufshcd.c
include/linux/blk-mq-pci.h
include/linux/blk-mq-rdma.h
include/linux/blk-mq-virtio.h
include/linux/blk-mq.h
include/scsi/scsi_host.h

index 3db84d3197f111713035855ce6d761a9984c207d..9c2fce1a7b50eee5e63cb65b3cfe312b4d21ffdc 100644 (file)
@@ -32,7 +32,7 @@ static int get_first_sibling(unsigned int cpu)
        return cpu;
 }
 
-int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
 {
        unsigned int *map = qmap->mq_map;
        unsigned int nr_queues = qmap->nr_queues;
@@ -70,8 +70,6 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
                                map[cpu] = map[first_sibling];
                }
        }
-
-       return 0;
 }
 EXPORT_SYMBOL_GPL(blk_mq_map_queues);
 
index b595a94c4d16bb93b5dc61c7880534c1ef5a528e..a90b88fd1332ce68582487643e305058f9403165 100644 (file)
@@ -23,8 +23,8 @@
  * that maps a queue to the CPUs that have irq affinity for the corresponding
  * vector.
  */
-int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
-                           int offset)
+void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
+                          int offset)
 {
        const struct cpumask *mask;
        unsigned int queue, cpu;
@@ -38,11 +38,10 @@ int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
                        qmap->mq_map[cpu] = qmap->queue_offset + queue;
        }
 
-       return 0;
+       return;
 
 fallback:
        WARN_ON_ONCE(qmap->nr_queues > 1);
        blk_mq_clear_mq_map(qmap);
-       return 0;
 }
 EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
index 14f968e58b8f7103bc81db1f42996d6853f70317..29c1f4d6eb0412308afbdac2317eeec0ac11e121 100644 (file)
@@ -21,7 +21,7 @@
  * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
  * vector, we fallback to the naive mapping.
  */
-int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
+void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
                struct ib_device *dev, int first_vec)
 {
        const struct cpumask *mask;
@@ -36,9 +36,9 @@ int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
                        map->mq_map[cpu] = map->queue_offset + queue;
        }
 
-       return 0;
+       return;
 
 fallback:
-       return blk_mq_map_queues(map);
+       blk_mq_map_queues(map);
 }
 EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
index 7b8a42c351026501dc8aceaf6f33d5a82a55e42f..6589f076a09635ca1ef3e68638cb7bd4708481ad 100644 (file)
@@ -21,7 +21,7 @@
  * that maps a queue to the CPUs that have irq affinity for the corresponding
  * vector.
  */
-int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
+void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
                struct virtio_device *vdev, int first_vec)
 {
        const struct cpumask *mask;
@@ -39,8 +39,9 @@ int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
                        qmap->mq_map[cpu] = qmap->queue_offset + queue;
        }
 
-       return 0;
+       return;
+
 fallback:
-       return blk_mq_map_queues(qmap);
+       blk_mq_map_queues(qmap);
 }
 EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
index 3c1e6b6d991d2de7ad831a4bc18eabef9448958b..4b90d2d8cfb0279fc6a1929dc58304c3b10c5965 100644 (file)
@@ -4190,7 +4190,7 @@ static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
        return 0;
 }
 
-static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
 {
        /*
         * blk_mq_map_queues() and multiple .map_queues() implementations
@@ -4220,10 +4220,10 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
                for (i = 0; i < set->nr_maps; i++)
                        blk_mq_clear_mq_map(&set->map[i]);
 
-               return set->ops->map_queues(set);
+               set->ops->map_queues(set);
        } else {
                BUG_ON(set->nr_maps > 1);
-               return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+               blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
        }
 }
 
@@ -4322,9 +4322,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
                set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
        }
 
-       ret = blk_mq_update_queue_map(set);
-       if (ret)
-               goto out_free_mq_map;
+       blk_mq_update_queue_map(set);
 
        ret = blk_mq_alloc_set_map_and_rqs(set);
        if (ret)
index 535059209693b27c774b661b66b7b6294c3ac4b5..1f154f92f4c276bc355ed7f61717289e1e950079 100644 (file)
@@ -1528,7 +1528,7 @@ static bool should_requeue_request(struct request *rq)
        return false;
 }
 
-static int null_map_queues(struct blk_mq_tag_set *set)
+static void null_map_queues(struct blk_mq_tag_set *set)
 {
        struct nullb *nullb = set->driver_data;
        int i, qoff;
@@ -1579,8 +1579,6 @@ static int null_map_queues(struct blk_mq_tag_set *set)
                qoff += map->nr_queues;
                blk_mq_map_queues(map);
        }
-
-       return 0;
 }
 
 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
index 04da33a22ef4f5c4c1151a459b78db09d431272b..9d01e7ab33e4b081fde6b9c73a45cd5741671031 100644 (file)
@@ -1165,7 +1165,7 @@ static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
        return cnt;
 }
 
-static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
+static void rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
 {
        struct rnbd_clt_session *sess = set->driver_data;
 
@@ -1194,8 +1194,6 @@ static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
                        set->map[HCTX_TYPE_DEFAULT].nr_queues,
                        set->map[HCTX_TYPE_READ].nr_queues);
        }
-
-       return 0;
 }
 
 static struct blk_mq_ops rnbd_mq_ops = {
index 30255fcaf18121c5051ba156e821ee0fd33ba4aa..23c5a1239520c85f83a152f475577b006a5b8a95 100644 (file)
@@ -802,7 +802,7 @@ static const struct attribute_group *virtblk_attr_groups[] = {
        NULL,
 };
 
-static int virtblk_map_queues(struct blk_mq_tag_set *set)
+static void virtblk_map_queues(struct blk_mq_tag_set *set)
 {
        struct virtio_blk *vblk = set->driver_data;
        int i, qoff;
@@ -827,8 +827,6 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
                else
                        blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
        }
-
-       return 0;
 }
 
 static void virtblk_complete_batch(struct io_comp_batch *iob)
index 127abaf9ba5d694c4c5fd92d9510dcdd789b1e2a..42767fb754552f5f1b25e726278a19e03e582504 100644 (file)
@@ -2860,7 +2860,7 @@ nvme_fc_complete_rq(struct request *rq)
        nvme_fc_ctrl_put(ctrl);
 }
 
-static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
+static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
 {
        struct nvme_fc_ctrl *ctrl = set->driver_data;
        int i;
@@ -2880,7 +2880,6 @@ static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
                else
                        blk_mq_map_queues(map);
        }
-       return 0;
 }
 
 static const struct blk_mq_ops nvme_fc_mq_ops = {
index 3a1c37f32f30d93992556937e7ed356bd617cbfa..4a8cfb360d316980c1246bc01e7fce347818d126 100644 (file)
@@ -450,7 +450,7 @@ static int queue_irq_offset(struct nvme_dev *dev)
        return 0;
 }
 
-static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
+static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
 {
        struct nvme_dev *dev = set->driver_data;
        int i, qoff, offset;
@@ -477,8 +477,6 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
                qoff += map->nr_queues;
                offset += map->nr_queues;
        }
-
-       return 0;
 }
 
 /*
index 3100643be2993c9ff07820daee9ca82635f8dd69..ba08851e42c31c4d44decde9edd3684bebd7874e 100644 (file)
@@ -2188,7 +2188,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
        nvme_complete_rq(rq);
 }
 
-static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
+static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
 {
        struct nvme_rdma_ctrl *ctrl = set->driver_data;
        struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
@@ -2231,8 +2231,6 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
                ctrl->io_queues[HCTX_TYPE_DEFAULT],
                ctrl->io_queues[HCTX_TYPE_READ],
                ctrl->io_queues[HCTX_TYPE_POLL]);
-
-       return 0;
 }
 
 static const struct blk_mq_ops nvme_rdma_mq_ops = {
index 044da18c06f51249bb917ccb32aae23b2c87d1c8..ef151c23d49512805ee8d3808ebd6b1ae56b98e4 100644 (file)
@@ -2471,7 +2471,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
        return BLK_STS_OK;
 }
 
-static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
+static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
 {
        struct nvme_tcp_ctrl *ctrl = set->driver_data;
        struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
@@ -2512,8 +2512,6 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
                ctrl->io_queues[HCTX_TYPE_DEFAULT],
                ctrl->io_queues[HCTX_TYPE_READ],
                ctrl->io_queues[HCTX_TYPE_POLL]);
-
-       return 0;
 }
 
 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
index 70e401fd432a04d46b73ed84ae8c29451044a1f1..c370272761625aedad017fedcf47d9d4ab11d30f 100644 (file)
@@ -3537,7 +3537,7 @@ static struct attribute *host_v2_hw_attrs[] = {
 
 ATTRIBUTE_GROUPS(host_v2_hw);
 
-static int map_queues_v2_hw(struct Scsi_Host *shost)
+static void map_queues_v2_hw(struct Scsi_Host *shost)
 {
        struct hisi_hba *hisi_hba = shost_priv(shost);
        struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
@@ -3552,9 +3552,6 @@ static int map_queues_v2_hw(struct Scsi_Host *shost)
                for_each_cpu(cpu, mask)
                        qmap->mq_map[cpu] = qmap->queue_offset + queue;
        }
-
-       return 0;
-
 }
 
 static struct scsi_host_template sht_v2_hw = {
index efe8c5be58702c2cc806db9519e5003c96629a3a..d716e5632d0f20e607b2533838e39457c6949d1d 100644 (file)
@@ -3171,13 +3171,12 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
        return 0;
 }
 
-static int hisi_sas_map_queues(struct Scsi_Host *shost)
+static void hisi_sas_map_queues(struct Scsi_Host *shost)
 {
        struct hisi_hba *hisi_hba = shost_priv(shost);
        struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 
-       return blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
-                                    BASE_VECTORS_V3_HW);
+       blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW);
 }
 
 static struct scsi_host_template sht_v3_hw = {
index a3e117a4b8e746981daa7315b716e6e0dc2c8a4e..f17813b1ffae8ea36af275ddac3795df2abbbe3c 100644 (file)
@@ -3174,7 +3174,7 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
        return 0;
 }
 
-static int megasas_map_queues(struct Scsi_Host *shost)
+static void megasas_map_queues(struct Scsi_Host *shost)
 {
        struct megasas_instance *instance;
        int qoff = 0, offset;
@@ -3183,7 +3183,7 @@ static int megasas_map_queues(struct Scsi_Host *shost)
        instance = (struct megasas_instance *)shost->hostdata;
 
        if (shost->nr_hw_queues == 1)
-               return 0;
+               return;
 
        offset = instance->low_latency_index_start;
 
@@ -3209,8 +3209,6 @@ static int megasas_map_queues(struct Scsi_Host *shost)
                map->queue_offset = qoff;
                blk_mq_map_queues(map);
        }
-
-       return 0;
 }
 
 static void megasas_aen_polling(struct work_struct *work);
index bfa1165e23b67ddb232108ce711d6d0694be6293..9681c8bf24edb839382965bcc853c635583fa61c 100644 (file)
@@ -3464,7 +3464,7 @@ static int mpi3mr_bios_param(struct scsi_device *sdev,
  *
  * Return: return zero.
  */
-static int mpi3mr_map_queues(struct Scsi_Host *shost)
+static void mpi3mr_map_queues(struct Scsi_Host *shost)
 {
        struct mpi3mr_ioc *mrioc = shost_priv(shost);
        int i, qoff, offset;
@@ -3500,9 +3500,6 @@ static int mpi3mr_map_queues(struct Scsi_Host *shost)
                qoff += map->nr_queues;
                offset += map->nr_queues;
        }
-
-       return 0;
-
 }
 
 /**
index def37a7e59807dcc1cb0472a5f3fb95dc5290f82..44618bf66d9bcfaf6fc98ee2839754a86d0de560 100644 (file)
@@ -11872,7 +11872,7 @@ out:
  * scsih_map_queues - map reply queues with request queues
  * @shost: SCSI host pointer
  */
-static int scsih_map_queues(struct Scsi_Host *shost)
+static void scsih_map_queues(struct Scsi_Host *shost)
 {
        struct MPT3SAS_ADAPTER *ioc =
            (struct MPT3SAS_ADAPTER *)shost->hostdata;
@@ -11882,7 +11882,7 @@ static int scsih_map_queues(struct Scsi_Host *shost)
        int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
 
        if (shost->nr_hw_queues == 1)
-               return 0;
+               return;
 
        for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
                map = &shost->tag_set.map[i];
@@ -11910,7 +11910,6 @@ static int scsih_map_queues(struct Scsi_Host *shost)
 
                qoff += map->nr_queues;
        }
-       return 0;
 }
 
 /* shost template for SAS 2.0 HBA devices */
index a0028e130a7e45aecc057a7bc2f85963d3ea465e..2ff2fac1e403d4099fe9e90fa488d8f85eed3ce7 100644 (file)
@@ -81,7 +81,7 @@ LIST_HEAD(hba_list);
 
 struct workqueue_struct *pm8001_wq;
 
-static int pm8001_map_queues(struct Scsi_Host *shost)
+static void pm8001_map_queues(struct Scsi_Host *shost)
 {
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
index 7450c3458be7e0f541e8a4d11e02222ddc1c5d54..02fdeb0d31ec4afed59a923a75670a8a1dbf6ef3 100644 (file)
@@ -684,12 +684,8 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
                struct blk_mq_queue_map *map)
 {
        struct scsi_qla_host *vha = lport->private;
-       int rc;
 
-       rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
-       if (rc)
-               ql_log(ql_log_warn, vha, 0x21de,
-                      "pci map queue failed 0x%x", rc);
+       blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
 }
 
 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
index 0bd0fd1042dfe24ce86ac8addb7dc2f2cc866b7a..87a93892deac003f5374d38cf0d99011cb9f4675 100644 (file)
@@ -350,7 +350,7 @@ MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
 
 static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
-static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2xxx_map_queues(struct Scsi_Host *shost);
 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
 
 u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
@@ -7994,17 +7994,15 @@ qla_pci_reset_done(struct pci_dev *pdev)
        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
 }
 
-static int qla2xxx_map_queues(struct Scsi_Host *shost)
+static void qla2xxx_map_queues(struct Scsi_Host *shost)
 {
-       int rc;
        scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
        struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 
        if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
-               rc = blk_mq_map_queues(qmap);
+               blk_mq_map_queues(qmap);
        else
-               rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
-       return rc;
+               blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
 }
 
 struct scsi_host_template qla2xxx_driver_template = {
index b8a76b89f85a3ce9db6320fbe1ff26c67a7d958c..697fc57bc711fb0e8492234c5cf65ac90d80b9c9 100644 (file)
@@ -7474,12 +7474,12 @@ static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        return check_condition_result;
 }
 
-static int sdebug_map_queues(struct Scsi_Host *shost)
+static void sdebug_map_queues(struct Scsi_Host *shost)
 {
        int i, qoff;
 
        if (shost->nr_hw_queues == 1)
-               return 0;
+               return;
 
        for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
                struct blk_mq_queue_map *map = &shost->tag_set.map[i];
@@ -7501,9 +7501,6 @@ static int sdebug_map_queues(struct Scsi_Host *shost)
 
                qoff += map->nr_queues;
        }
-
-       return 0;
-
 }
 
 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
index 4dbd29ab1dcc37792688849a468e5b8e5dc72ede..677f632d6fd321aa6617323572a0f5507cf61a5c 100644 (file)
@@ -1849,13 +1849,13 @@ static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
        return 0;
 }
 
-static int scsi_map_queues(struct blk_mq_tag_set *set)
+static void scsi_map_queues(struct blk_mq_tag_set *set)
 {
        struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
 
        if (shost->hostt->map_queues)
                return shost->hostt->map_queues(shost);
-       return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+       blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
 }
 
 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
index 7a8c2c75acbaf31bd8036929e12f106c474c7013..b971fbe3b3a173db160702cdcb07095b073bf96e 100644 (file)
@@ -6436,12 +6436,12 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
        return 0;
 }
 
-static int pqi_map_queues(struct Scsi_Host *shost)
+static void pqi_map_queues(struct Scsi_Host *shost)
 {
        struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 
-       return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
-                                       ctrl_info->pci_dev, 0);
+       blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+                             ctrl_info->pci_dev, 0);
 }
 
 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
index 578c4b6d0f7d97b1caddb46d8e0baef6f0b21822..077a8e24bd28f6fc1b17d08b384ad292065d62c3 100644 (file)
@@ -711,12 +711,12 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
        return virtscsi_tmf(vscsi, cmd);
 }
 
-static int virtscsi_map_queues(struct Scsi_Host *shost)
+static void virtscsi_map_queues(struct Scsi_Host *shost)
 {
        struct virtio_scsi *vscsi = shost_priv(shost);
        struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
 
-       return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+       blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
 }
 
 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
index 6bc679d22927998eacbd779d69d4cfa9a9f9b6c5..f27a812a44161be4705bb8c98ff874c6528cbe3b 100644 (file)
@@ -2701,9 +2701,9 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
  * Associate the UFS controller queue with the default and poll HCTX types.
  * Initialize the mq_map[] arrays.
  */
-static int ufshcd_map_queues(struct Scsi_Host *shost)
+static void ufshcd_map_queues(struct Scsi_Host *shost)
 {
-       int i, ret;
+       int i;
 
        for (i = 0; i < shost->nr_maps; i++) {
                struct blk_mq_queue_map *map = &shost->tag_set.map[i];
@@ -2720,11 +2720,8 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
                        WARN_ON_ONCE(true);
                }
                map->queue_offset = 0;
-               ret = blk_mq_map_queues(map);
-               WARN_ON_ONCE(ret);
+               blk_mq_map_queues(map);
        }
-
-       return 0;
 }
 
 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
index 0b1f45c62623891ac1743a3f8ded7a3da12a80b1..ca544e1d3508f34ab6e198b0bb17efe88de4d14d 100644 (file)
@@ -5,7 +5,7 @@
 struct blk_mq_queue_map;
 struct pci_dev;
 
-int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
-                         int offset);
+void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
+                          int offset);
 
 #endif /* _LINUX_BLK_MQ_PCI_H */
index 5cc5f0f36218fefabf979bd0bf13005e0a7817af..53b58c610e7671eb37a9925819c890f19677747a 100644 (file)
@@ -5,7 +5,7 @@
 struct blk_mq_tag_set;
 struct ib_device;
 
-int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
+void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
                struct ib_device *dev, int first_vec);
 
 #endif /* _LINUX_BLK_MQ_RDMA_H */
index 687ae287e1dc2c447add315f1d1abe8e90e4153b..13226e9b22dd53e4289d506d49c52671de036ee8 100644 (file)
@@ -5,7 +5,7 @@
 struct blk_mq_queue_map;
 struct virtio_device;
 
-int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
+void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
                struct virtio_device *vdev, int first_vec);
 
 #endif /* _LINUX_BLK_MQ_VIRTIO_H */
index 92294a5fb083612e578532362160c468d0e435c8..c38575209d512fa53fa9a25f758804294a590b74 100644 (file)
@@ -630,7 +630,7 @@ struct blk_mq_ops {
         * @map_queues: This allows drivers specify their own queue mapping by
         * overriding the setup-time function that builds the mq_map.
         */
-       int (*map_queues)(struct blk_mq_tag_set *set);
+       void (*map_queues)(struct blk_mq_tag_set *set);
 
 #ifdef CONFIG_BLK_DEBUG_FS
        /**
@@ -880,7 +880,7 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
                                     unsigned long timeout);
 
-int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 
 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
index aa7b7496c93aa16a6c1487425ec1de6dc862923b..7d51af3e7c40b75b3cdd90131453ae84c8748f28 100644 (file)
@@ -276,7 +276,7 @@ struct scsi_host_template {
         *
         * Status: OPTIONAL
         */
-       int (* map_queues)(struct Scsi_Host *shost);
+       void (* map_queues)(struct Scsi_Host *shost);
 
        /*
         * SCSI interface of blk_poll - poll for IO completions.