nvme: move reset workqueue handling to common code
[linux-block.git] / drivers / nvme / host / core.c
index 032cce3311e747a8696706946990e7550632eea3..f1b78cc2069552f4e596d54b7c5ce864bc64d89f 100644 (file)
@@ -45,7 +45,7 @@ module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 EXPORT_SYMBOL_GPL(nvme_io_timeout);
 
-unsigned char shutdown_timeout = 5;
+static unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
 
@@ -65,11 +65,34 @@ static bool force_apst;
 module_param(force_apst, bool, 0644);
 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
 
+struct workqueue_struct *nvme_wq;
+EXPORT_SYMBOL_GPL(nvme_wq);
+
 static LIST_HEAD(nvme_ctrl_list);
 static DEFINE_SPINLOCK(dev_list_lock);
 
 static struct class *nvme_class;
 
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
+{
+       if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+               return -EBUSY;
+       if (!queue_work(nvme_wq, &ctrl->reset_work))
+               return -EBUSY;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
+
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+{
+       int ret;
+
+       ret = nvme_reset_ctrl(ctrl);
+       if (!ret)
+               flush_work(&ctrl->reset_work);
+       return ret;
+}
+
 static blk_status_t nvme_error_status(struct request *req)
 {
        switch (nvme_req(req)->status & 0x7ff) {
@@ -157,7 +180,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                switch (old_state) {
                case NVME_CTRL_NEW:
                case NVME_CTRL_LIVE:
-               case NVME_CTRL_RECONNECTING:
                        changed = true;
                        /* FALLTHRU */
                default:
@@ -323,12 +345,21 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
        return BLK_STS_OK;
 }
 
-static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
-               struct nvme_command *cmnd)
+static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
+               struct request *req, struct nvme_command *cmnd)
 {
        u16 control = 0;
        u32 dsmgmt = 0;
 
+       /*
+        * If formated with metadata, require the block layer provide a buffer
+        * unless this namespace is formated such that the metadata can be
+        * stripped/generated by the controller with PRACT=1.
+        */
+       if (ns && ns->ms && (!ns->pi_type || ns->ms != 8) &&
+           !blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
+               return BLK_STS_NOTSUPP;
+
        if (req->cmd_flags & REQ_FUA)
                control |= NVME_RW_FUA;
        if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
@@ -362,6 +393,7 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
 
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+       return 0;
 }
 
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
@@ -390,7 +422,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                break;
        case REQ_OP_READ:
        case REQ_OP_WRITE:
-               nvme_setup_rw(ns, req, cmd);
+               ret = nvme_setup_rw(ns, req, cmd);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -592,7 +624,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
        if (nvme_keep_alive(ctrl)) {
                /* allocation failure, reset the controller */
                dev_err(ctrl->device, "keep-alive failed\n");
-               ctrl->ops->reset_ctrl(ctrl);
+               nvme_reset_ctrl_sync(ctrl);
                return;
        }
 }
@@ -636,6 +668,77 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
        return error;
 }
 
+static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid)
+{
+       struct nvme_command c = { };
+       int status;
+       void *data;
+       int pos;
+       int len;
+
+       c.identify.opcode = nvme_admin_identify;
+       c.identify.nsid = cpu_to_le32(nsid);
+       c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
+
+       data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, data,
+                                     NVME_IDENTIFY_DATA_SIZE);
+       if (status)
+               goto free_data;
+
+       for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+               struct nvme_ns_id_desc *cur = data + pos;
+
+               if (cur->nidl == 0)
+                       break;
+
+               switch (cur->nidt) {
+               case NVME_NIDT_EUI64:
+                       if (cur->nidl != NVME_NIDT_EUI64_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_EUI64_LEN;
+                       memcpy(ns->eui, data + pos + sizeof(*cur), len);
+                       break;
+               case NVME_NIDT_NGUID:
+                       if (cur->nidl != NVME_NIDT_NGUID_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_NGUID_LEN;
+                       memcpy(ns->nguid, data + pos + sizeof(*cur), len);
+                       break;
+               case NVME_NIDT_UUID:
+                       if (cur->nidl != NVME_NIDT_UUID_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_UUID_LEN;
+                       uuid_copy(&ns->uuid, data + pos + sizeof(*cur));
+                       break;
+               default:
+                       /* Skip unnkown types */
+                       len = cur->nidl;
+                       break;
+               }
+
+               len += sizeof(*cur);
+       }
+free_data:
+       kfree(data);
+       return status;
+}
+
 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
 {
        struct nvme_command c = { };
@@ -745,7 +848,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
         * access to the admin queue, as that might be only way to fix them up.
         */
        if (status > 0) {
-               dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
+               dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
                *count = 0;
        } else {
                nr_io_queues = min(result & 0xffff, result >> 16) + 1;
@@ -1009,7 +1112,15 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
        if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
                memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
        if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
-               memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
+               memcpy(ns->nguid, (*id)->nguid, sizeof(ns->nguid));
+       if (ns->ctrl->vs >= NVME_VS(1, 3, 0)) {
+                /* Don't treat error as fatal we potentially
+                 * already have a NGUID or EUI-64
+                 */
+               if (nvme_identify_ns_descs(ns, ns->ns_id))
+                       dev_warn(ns->ctrl->device,
+                                "%s: Identify Descriptors failed\n", __func__);
+       }
 
        return 0;
 }
@@ -1276,7 +1387,7 @@ EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
 
 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
 {
-       unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
+       unsigned long timeout = jiffies + (shutdown_timeout * HZ);
        u32 csts;
        int ret;
 
@@ -1575,7 +1686,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        }
 
        if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
-               dev_warn(ctrl->dev, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
+               dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
                ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
        }
 
@@ -1603,7 +1714,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        prev_apsta = ctrl->apsta;
        if (ctrl->quirks & NVME_QUIRK_NO_APST) {
                if (force_apst && id->apsta) {
-                       dev_warn(ctrl->dev, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
+                       dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
                        ctrl->apsta = 1;
                } else {
                        ctrl->apsta = 0;
@@ -1627,12 +1738,14 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                        ret = -EINVAL;
 
                if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
-                       dev_err(ctrl->dev,
+                       dev_err(ctrl->device,
                                "keep-alive support is mandatory for fabrics\n");
                        ret = -EINVAL;
                }
        } else {
                ctrl->cntlid = le16_to_cpu(id->cntlid);
+               ctrl->hmpre = le32_to_cpu(id->hmpre);
+               ctrl->hmmin = le32_to_cpu(id->hmmin);
        }
 
        kfree(id);
@@ -1728,7 +1841,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
                return nvme_dev_user_cmd(ctrl, argp);
        case NVME_IOCTL_RESET:
                dev_warn(ctrl->device, "resetting controller\n");
-               return ctrl->ops->reset_ctrl(ctrl);
+               return nvme_reset_ctrl_sync(ctrl);
        case NVME_IOCTL_SUBSYS_RESET:
                return nvme_reset_subsystem(ctrl);
        case NVME_IOCTL_RESCAN:
@@ -1754,7 +1867,7 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
        int ret;
 
-       ret = ctrl->ops->reset_ctrl(ctrl);
+       ret = nvme_reset_ctrl_sync(ctrl);
        if (ret < 0)
                return ret;
        return count;
@@ -1780,8 +1893,8 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
        int serial_len = sizeof(ctrl->serial);
        int model_len = sizeof(ctrl->model);
 
-       if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
-               return sprintf(buf, "eui.%16phN\n", ns->uuid);
+       if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+               return sprintf(buf, "eui.%16phN\n", ns->nguid);
 
        if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
                return sprintf(buf, "eui.%8phN\n", ns->eui);
@@ -1796,11 +1909,28 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
 
+static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+       return sprintf(buf, "%pU\n", ns->nguid);
+}
+static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
+
 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
                                                                char *buf)
 {
        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       return sprintf(buf, "%pU\n", ns->uuid);
+
+       /* For backward compatibility expose the NGUID to userspace if
+        * we have no UUID set
+        */
+       if (uuid_is_null(&ns->uuid)) {
+               printk_ratelimited(KERN_WARNING
+                                  "No UUID available providing old NGUID\n");
+               return sprintf(buf, "%pU\n", ns->nguid);
+       }
+       return sprintf(buf, "%pU\n", &ns->uuid);
 }
 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
 
@@ -1823,6 +1953,7 @@ static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
 static struct attribute *nvme_ns_attrs[] = {
        &dev_attr_wwid.attr,
        &dev_attr_uuid.attr,
+       &dev_attr_nguid.attr,
        &dev_attr_eui.attr,
        &dev_attr_nsid.attr,
        NULL,
@@ -1835,7 +1966,12 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 
        if (a == &dev_attr_uuid.attr) {
-               if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+               if (uuid_is_null(&ns->uuid) ||
+                   !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+                       return 0;
+       }
+       if (a == &dev_attr_nguid.attr) {
+               if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
                        return 0;
        }
        if (a == &dev_attr_eui.attr) {
@@ -2049,7 +2185,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        if (nvme_nvm_ns_supported(ns, id) &&
                                nvme_nvm_register(ns, disk_name, node)) {
-               dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__);
+               dev_warn(ctrl->device, "%s: LightNVM init failure\n", __func__);
                goto out_free_id;
        }
 
@@ -2224,7 +2360,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
         * removal.
         */
        if (ctrl->state == NVME_CTRL_LIVE)
-               schedule_work(&ctrl->scan_work);
+               queue_work(nvme_wq, &ctrl->scan_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
@@ -2279,7 +2415,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
                /*FALLTHRU*/
        case NVME_SC_ABORT_REQ:
                ++ctrl->event_limit;
-               schedule_work(&ctrl->async_event_work);
+               queue_work(nvme_wq, &ctrl->async_event_work);
                break;
        default:
                break;
@@ -2302,7 +2438,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 void nvme_queue_async_events(struct nvme_ctrl *ctrl)
 {
        ctrl->event_limit = NVME_NR_AERS;
-       schedule_work(&ctrl->async_event_work);
+       queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_async_events);
 
@@ -2537,10 +2673,15 @@ int __init nvme_core_init(void)
 {
        int result;
 
+       nvme_wq = alloc_workqueue("nvme-wq",
+                       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+       if (!nvme_wq)
+               return -ENOMEM;
+
        result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
                                                        &nvme_dev_fops);
        if (result < 0)
-               return result;
+               goto destroy_wq;
        else if (result > 0)
                nvme_char_major = result;
 
@@ -2552,8 +2693,10 @@ int __init nvme_core_init(void)
 
        return 0;
 
- unregister_chrdev:
+unregister_chrdev:
        __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+destroy_wq:
+       destroy_workqueue(nvme_wq);
        return result;
 }
 
@@ -2561,6 +2704,7 @@ void nvme_core_exit(void)
 {
        class_destroy(nvme_class);
        __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+       destroy_workqueue(nvme_wq);
 }
 
 MODULE_LICENSE("GPL");