Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-block.git] / drivers / vhost / scsi.c
index e68f7d226bc9ff9482a8842c7e32dc5ba598236e..bb10fa4bb4f6eca2107d66eb639cefc6f532d07f 100644 (file)
@@ -229,7 +229,10 @@ struct vhost_scsi_ctx {
        struct iov_iter out_iter;
 };
 
-/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
+/*
+ * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
+ * configfs management operations.
+ */
 static DEFINE_MUTEX(vhost_scsi_mutex);
 static LIST_HEAD(vhost_scsi_list);
 
@@ -291,11 +294,6 @@ static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
        return 1;
 }
 
-static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
-{
-       return 0;
-}
-
 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
@@ -320,11 +318,6 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
        return tpg->tv_fabric_prot_type;
 }
 
-static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
-{
-       return 1;
-}
-
 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
 {
        struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
@@ -372,11 +365,6 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
        }
 }
 
-static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
-{
-       return 0;
-}
-
 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 {
        /* Go ahead and process the write immediately */
@@ -384,16 +372,6 @@ static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
        return 0;
 }
 
-static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
-{
-       return;
-}
-
-static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
-{
-       return 0;
-}
-
 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 {
        transport_generic_free_cmd(se_cmd, 0);
@@ -1526,7 +1504,7 @@ out:
  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
  *
  *  The lock nesting rule is:
- *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
+ *    vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
 static int
 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
@@ -1540,7 +1518,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
        int index, ret, i, len;
        bool match = false;
 
-       mutex_lock(&vhost_scsi_mutex);
        mutex_lock(&vs->dev.mutex);
 
        /* Verify that ring has been setup correctly. */
@@ -1561,6 +1538,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
        if (vs->vs_tpg)
                memcpy(vs_tpg, vs->vs_tpg, len);
 
+       mutex_lock(&vhost_scsi_mutex);
        list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
                mutex_lock(&tpg->tv_tpg_mutex);
                if (!tpg->tpg_nexus) {
@@ -1576,6 +1554,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
                        if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
                                mutex_unlock(&tpg->tv_tpg_mutex);
+                               mutex_unlock(&vhost_scsi_mutex);
                                ret = -EEXIST;
                                goto undepend;
                        }
@@ -1590,6 +1569,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                        if (ret) {
                                pr_warn("target_depend_item() failed: %d\n", ret);
                                mutex_unlock(&tpg->tv_tpg_mutex);
+                               mutex_unlock(&vhost_scsi_mutex);
                                goto undepend;
                        }
                        tpg->tv_tpg_vhost_count++;
@@ -1599,6 +1579,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                }
                mutex_unlock(&tpg->tv_tpg_mutex);
        }
+       mutex_unlock(&vhost_scsi_mutex);
 
        if (match) {
                memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
@@ -1654,7 +1635,6 @@ undepend:
        kfree(vs_tpg);
 out:
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&vhost_scsi_mutex);
        return ret;
 }
 
@@ -1670,7 +1650,6 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
        int index, ret, i;
        u8 target;
 
-       mutex_lock(&vhost_scsi_mutex);
        mutex_lock(&vs->dev.mutex);
        /* Verify that ring has been setup correctly. */
        for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -1691,11 +1670,10 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                if (!tpg)
                        continue;
 
-               mutex_lock(&tpg->tv_tpg_mutex);
                tv_tport = tpg->tport;
                if (!tv_tport) {
                        ret = -ENODEV;
-                       goto err_tpg;
+                       goto err_dev;
                }
 
                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
@@ -1704,35 +1682,51 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                                tv_tport->tport_name, tpg->tport_tpgt,
                                t->vhost_wwpn, t->vhost_tpgt);
                        ret = -EINVAL;
-                       goto err_tpg;
+                       goto err_dev;
                }
+               match = true;
+       }
+       if (!match)
+               goto free_vs_tpg;
+
+       /* Prevent new cmds from starting and accessing the tpgs/sessions */
+       for (i = 0; i < vs->dev.nvqs; i++) {
+               vq = &vs->vqs[i].vq;
+               mutex_lock(&vq->mutex);
+               vhost_vq_set_backend(vq, NULL);
+               mutex_unlock(&vq->mutex);
+       }
+       /* Make sure cmds are not running before tearing them down. */
+       vhost_scsi_flush(vs);
+
+       for (i = 0; i < vs->dev.nvqs; i++) {
+               vq = &vs->vqs[i].vq;
+               vhost_scsi_destroy_vq_cmds(vq);
+       }
+
+       /*
+        * We can now release our hold on the tpg and sessions and userspace
+        * can free them after this point.
+        */
+       for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+               target = i;
+               tpg = vs->vs_tpg[target];
+               if (!tpg)
+                       continue;
+
+               mutex_lock(&tpg->tv_tpg_mutex);
+
                tpg->tv_tpg_vhost_count--;
                tpg->vhost_scsi = NULL;
                vs->vs_tpg[target] = NULL;
-               match = true;
+
                mutex_unlock(&tpg->tv_tpg_mutex);
-               /*
-                * Release se_tpg->tpg_group.cg_item configfs dependency now
-                * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
-                */
+
                se_tpg = &tpg->se_tpg;
                target_undepend_item(&se_tpg->tpg_group.cg_item);
        }
-       if (match) {
-               for (i = 0; i < vs->dev.nvqs; i++) {
-                       vq = &vs->vqs[i].vq;
-                       mutex_lock(&vq->mutex);
-                       vhost_vq_set_backend(vq, NULL);
-                       mutex_unlock(&vq->mutex);
-               }
-               /* Make sure cmds are not running before tearing them down. */
-               vhost_scsi_flush(vs);
 
-               for (i = 0; i < vs->dev.nvqs; i++) {
-                       vq = &vs->vqs[i].vq;
-                       vhost_scsi_destroy_vq_cmds(vq);
-               }
-       }
+free_vs_tpg:
        /*
         * Act as synchronize_rcu to make sure access to
         * old vs->vs_tpg is finished.
@@ -1742,14 +1736,10 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
        vs->vs_tpg = NULL;
        WARN_ON(vs->vs_events_nr);
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&vhost_scsi_mutex);
        return 0;
 
-err_tpg:
-       mutex_unlock(&tpg->tv_tpg_mutex);
 err_dev:
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&vhost_scsi_mutex);
        return ret;
 }
 
@@ -1990,8 +1980,6 @@ vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
        if (!vs)
                return;
 
-       mutex_lock(&vs->dev.mutex);
-
        if (plug)
                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
        else
@@ -1999,11 +1987,18 @@ vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
 
        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        mutex_lock(&vq->mutex);
+       /*
+        * We can't queue events if the backend has been cleared, because
+        * we could end up queueing an event after the flush.
+        */
+       if (!vhost_vq_get_backend(vq))
+               goto unlock;
+
        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
                vhost_scsi_send_evt(vs, tpg, lun,
                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
+unlock:
        mutex_unlock(&vq->mutex);
-       mutex_unlock(&vs->dev.mutex);
 }
 
 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
@@ -2022,15 +2017,10 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
                                struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&vhost_scsi_mutex);
-
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count++;
-       mutex_unlock(&tpg->tv_tpg_mutex);
-
        vhost_scsi_hotplug(tpg, lun);
-
-       mutex_unlock(&vhost_scsi_mutex);
+       mutex_unlock(&tpg->tv_tpg_mutex);
 
        return 0;
 }
@@ -2041,15 +2031,10 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
        struct vhost_scsi_tpg *tpg = container_of(se_tpg,
                                struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&vhost_scsi_mutex);
-
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count--;
-       mutex_unlock(&tpg->tv_tpg_mutex);
-
        vhost_scsi_hotunplug(tpg, lun);
-
-       mutex_unlock(&vhost_scsi_mutex);
+       mutex_unlock(&tpg->tv_tpg_mutex);
 }
 
 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
@@ -2435,17 +2420,11 @@ static const struct target_core_fabric_ops vhost_scsi_ops = {
        .tpg_get_tag                    = vhost_scsi_get_tpgt,
        .tpg_check_demo_mode            = vhost_scsi_check_true,
        .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
-       .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
-       .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
        .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
-       .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
        .release_cmd                    = vhost_scsi_release_cmd,
        .check_stop_free                = vhost_scsi_check_stop_free,
-       .sess_get_index                 = vhost_scsi_sess_get_index,
        .sess_get_initiator_sid         = NULL,
        .write_pending                  = vhost_scsi_write_pending,
-       .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
-       .get_cmd_state                  = vhost_scsi_get_cmd_state,
        .queue_data_in                  = vhost_scsi_queue_data_in,
        .queue_status                   = vhost_scsi_queue_status,
        .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,