nvme: properly handle partially initialized queues in nvme_create_io_queues
[linux-2.6-block.git] / drivers / block / nvme-core.c
index bf35846558c8e349e8c7bc72089c08a0ed74cf23..01a6d1b2d7e5be553f21523ab168d9f0f7e1a77e 100644 (file)
@@ -84,9 +84,10 @@ static wait_queue_head_t nvme_kthread_wait;
 
 static struct class *nvme_class;
 
-static void nvme_reset_failed_dev(struct work_struct *ws);
+static int __nvme_reset(struct nvme_dev *dev);
 static int nvme_reset(struct nvme_dev *dev);
 static int nvme_process_cq(struct nvme_queue *nvmeq);
+static void nvme_dead_ctrl(struct nvme_dev *dev);
 
 struct async_cmd_info {
        struct kthread_work work;
@@ -1277,17 +1278,13 @@ static void nvme_abort_req(struct request *req)
        struct nvme_command cmd;
 
        if (!nvmeq->qid || cmd_rq->aborted) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&dev_list_lock, flags);
-               if (work_busy(&dev->reset_work))
-                       goto out;
-               list_del_init(&dev->node);
-               dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n",
-                                                       req->tag, nvmeq->qid);
-               queue_work(nvme_workq, &dev->reset_work);
- out:
-               spin_unlock_irqrestore(&dev_list_lock, flags);
+               spin_lock(&dev_list_lock);
+               if (!__nvme_reset(dev)) {
+                       dev_warn(dev->dev,
+                                "I/O %d QID %d timeout, reset controller\n",
+                                req->tag, nvmeq->qid);
+               }
+               spin_unlock(&dev_list_lock);
                return;
        }
 
@@ -2082,13 +2079,11 @@ static int nvme_kthread(void *data)
 
                        if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
                                                        csts & NVME_CSTS_CFS) {
-                               if (work_busy(&dev->reset_work))
-                                       continue;
-                               list_del_init(&dev->node);
-                               dev_warn(dev->dev,
-                                       "Failed status: %x, reset controller\n",
-                                       readl(&dev->bar->csts));
-                               queue_work(nvme_workq, &dev->reset_work);
+                               if (!__nvme_reset(dev)) {
+                                       dev_warn(dev->dev,
+                                               "Failed status: %x, reset controller\n",
+                                               readl(&dev->bar->csts));
+                               }
                                continue;
                        }
                        for (i = 0; i < dev->queue_count; i++) {
@@ -2194,6 +2189,13 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
        kfree(ns);
 }
 
+/*
+ * Create I/O queues.  Failing to create an I/O queue is not an issue,
+ * we can continue with less than the desired amount of queues, and
+ * even a controller without I/O queues an still be used to issue
+ * admin commands.  This might be useful to upgrade a buggy firmware
+ * for example.
+ */
 static void nvme_create_io_queues(struct nvme_dev *dev)
 {
        unsigned i;
@@ -2203,8 +2205,10 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
                        break;
 
        for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
-               if (nvme_create_queue(dev->queues[i], i))
+               if (nvme_create_queue(dev->queues[i], i)) {
+                       nvme_free_queues(dev, i);
                        break;
+               }
 }
 
 static int set_queue_count(struct nvme_dev *dev, int count)
@@ -2955,14 +2959,15 @@ static const struct file_operations nvme_dev_fops = {
        .compat_ioctl   = nvme_dev_ioctl,
 };
 
-static int nvme_dev_start(struct nvme_dev *dev)
+static void nvme_probe_work(struct work_struct *work)
 {
-       int result;
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
        bool start_thread = false;
+       int result;
 
        result = nvme_dev_map(dev);
        if (result)
-               return result;
+               goto out;
 
        result = nvme_configure_admin_queue(dev);
        if (result)
@@ -2997,7 +3002,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
                goto free_tags;
 
        dev->event_limit = 1;
-       return result;
+
+       /*
+        * Keep the controller around but remove all namespaces if we don't have
+        * any working I/O queue.
+        */
+       if (dev->online_queues < 2) {
+               dev_warn(dev->dev, "IO queues not created\n");
+               nvme_dev_remove(dev);
+       } else {
+               nvme_unfreeze_queues(dev);
+               nvme_dev_add(dev);
+       }
+
+       return;
 
  free_tags:
        nvme_dev_remove_admin(dev);
@@ -3009,7 +3027,9 @@ static int nvme_dev_start(struct nvme_dev *dev)
        nvme_dev_list_remove(dev);
  unmap:
        nvme_dev_unmap(dev);
-       return result;
+ out:
+       if (!work_busy(&dev->reset_work))
+               nvme_dead_ctrl(dev);
 }
 
 static int nvme_remove_dead_ctrl(void *arg)
@@ -3023,24 +3043,6 @@ static int nvme_remove_dead_ctrl(void *arg)
        return 0;
 }
 
-static int nvme_dev_resume(struct nvme_dev *dev)
-{
-       int ret;
-
-       ret = nvme_dev_start(dev);
-       if (ret)
-               return ret;
-       if (dev->online_queues < 2) {
-               dev_warn(dev->dev, "IO queues not created\n");
-               nvme_free_queues(dev, 1);
-               nvme_dev_remove(dev);
-       } else {
-               nvme_unfreeze_queues(dev);
-               nvme_dev_add(dev);
-       }
-       return 0;
-}
-
 static void nvme_dead_ctrl(struct nvme_dev *dev)
 {
        dev_warn(dev->dev, "Device failed to resume\n");
@@ -3053,8 +3055,9 @@ static void nvme_dead_ctrl(struct nvme_dev *dev)
        }
 }
 
-static void nvme_dev_reset(struct nvme_dev *dev)
+static void nvme_reset_work(struct work_struct *ws)
 {
+       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
        bool in_probe = work_busy(&dev->probe_work);
 
        nvme_dev_shutdown(dev);
@@ -3074,24 +3077,24 @@ static void nvme_dev_reset(struct nvme_dev *dev)
        schedule_work(&dev->probe_work);
 }
 
-static void nvme_reset_failed_dev(struct work_struct *ws)
+static int __nvme_reset(struct nvme_dev *dev)
 {
-       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
-       nvme_dev_reset(dev);
+       if (work_pending(&dev->reset_work))
+               return -EBUSY;
+       list_del_init(&dev->node);
+       queue_work(nvme_workq, &dev->reset_work);
+       return 0;
 }
 
 static int nvme_reset(struct nvme_dev *dev)
 {
-       int ret = -EBUSY;
+       int ret;
 
        if (!dev->admin_q || blk_queue_dying(dev->admin_q))
                return -ENODEV;
 
        spin_lock(&dev_list_lock);
-       if (!work_pending(&dev->reset_work)) {
-               queue_work(nvme_workq, &dev->reset_work);
-               ret = 0;
-       }
+       ret = __nvme_reset(dev);
        spin_unlock(&dev_list_lock);
 
        if (!ret) {
@@ -3118,7 +3121,6 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
 }
 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
 
-static void nvme_async_probe(struct work_struct *work);
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
@@ -3141,7 +3143,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto free;
 
        INIT_LIST_HEAD(&dev->namespaces);
-       INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
+       INIT_WORK(&dev->reset_work, nvme_reset_work);
        dev->dev = get_device(&pdev->dev);
        pci_set_drvdata(pdev, dev);
        result = nvme_set_instance(dev);
@@ -3169,7 +3171,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        INIT_LIST_HEAD(&dev->node);
        INIT_WORK(&dev->scan_work, nvme_dev_scan);
-       INIT_WORK(&dev->probe_work, nvme_async_probe);
+       INIT_WORK(&dev->probe_work, nvme_probe_work);
        schedule_work(&dev->probe_work);
        return 0;
 
@@ -3189,14 +3191,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return result;
 }
 
-static void nvme_async_probe(struct work_struct *work)
-{
-       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
-
-       if (nvme_dev_resume(dev) && !work_busy(&dev->reset_work))
-               nvme_dead_ctrl(dev);
-}
-
 static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);