nvme: simplify resets
authorChristoph Hellwig <hch@lst.de>
Thu, 26 Nov 2015 11:10:29 +0000 (12:10 +0100)
committerJens Axboe <axboe@fb.com>
Tue, 22 Dec 2015 16:38:33 +0000 (09:38 -0700)
Don't delete the controller from dev_list before queuing a reset, instead
just check for it being reset in the polling kthread.  This allows to remove
the dev_list_lock in various places, and in addition we can simply rely on
checking the queue_work return value to see if we could reset a controller.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/nvme/host/pci.c

index e683bd1a05e66850d756abb335d98355053c6606..febcef5ae0aad70d05f309156643cf2b613820c8 100644 (file)
@@ -77,7 +77,6 @@ struct nvme_dev;
 struct nvme_queue;
 struct nvme_iod;
 
-static int __nvme_reset(struct nvme_dev *dev);
 static int nvme_reset(struct nvme_dev *dev);
 static void nvme_process_cq(struct nvme_queue *nvmeq);
 static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_iod *iod);
@@ -1093,13 +1092,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
         * the admin queue.
         */
        if (!nvmeq->qid || cmd_rq->aborted) {
-               spin_lock_irq(&dev_list_lock);
-               if (!__nvme_reset(dev)) {
+               if (queue_work(nvme_workq, &dev->reset_work)) {
                        dev_warn(dev->dev,
                                 "I/O %d QID %d timeout, reset controller\n",
                                 req->tag, nvmeq->qid);
                }
-               spin_unlock_irq(&dev_list_lock);
                return BLK_EH_RESET_TIMER;
        }
 
@@ -1496,9 +1493,15 @@ static int nvme_kthread(void *data)
                        int i;
                        u32 csts = readl(dev->bar + NVME_REG_CSTS);
 
+                       /*
+                        * Skip controllers currently under reset.
+                        */
+                       if (work_pending(&dev->reset_work) || work_busy(&dev->reset_work))
+                               continue;
+
                        if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
                                                        csts & NVME_CSTS_CFS) {
-                               if (!__nvme_reset(dev)) {
+                               if (queue_work(nvme_workq, &dev->reset_work)) {
                                        dev_warn(dev->dev,
                                                "Failed status: %x, reset controller\n",
                                                readl(dev->bar + NVME_REG_CSTS));
@@ -2228,33 +2231,17 @@ static void nvme_reset_work(struct work_struct *ws)
        schedule_work(&dev->probe_work);
 }
 
-static int __nvme_reset(struct nvme_dev *dev)
-{
-       if (work_pending(&dev->reset_work))
-               return -EBUSY;
-       list_del_init(&dev->node);
-       queue_work(nvme_workq, &dev->reset_work);
-       return 0;
-}
-
 static int nvme_reset(struct nvme_dev *dev)
 {
-       int ret;
-
        if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
                return -ENODEV;
 
-       spin_lock(&dev_list_lock);
-       ret = __nvme_reset(dev);
-       spin_unlock(&dev_list_lock);
-
-       if (!ret) {
-               flush_work(&dev->reset_work);
-               flush_work(&dev->probe_work);
-               return 0;
-       }
+       if (!queue_work(nvme_workq, &dev->reset_work))
+               return -EBUSY;
 
-       return ret;
+       flush_work(&dev->reset_work);
+       flush_work(&dev->probe_work);
+       return 0;
 }
 
 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)