1 // SPDX-License-Identifier: GPL-2.0+
2 /*******************************************************************************
3 * Vhost kernel TCM fabric driver for virtio SCSI initiators
5 * (C) Copyright 2010-2013 Datera, Inc.
6 * (C) Copyright 2010-2012 IBM Corp.
8 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
9 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10 ****************************************************************************/
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <generated/utsrelease.h>
15 #include <linux/utsname.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/kthread.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/configfs.h>
22 #include <linux/ctype.h>
23 #include <linux/compat.h>
24 #include <linux/eventfd.h>
26 #include <linux/vmalloc.h>
27 #include <linux/miscdevice.h>
28 #include <asm/unaligned.h>
29 #include <scsi/scsi_common.h>
30 #include <scsi/scsi_proto.h>
31 #include <target/target_core_base.h>
32 #include <target/target_core_fabric.h>
33 #include <linux/vhost.h>
34 #include <linux/virtio_scsi.h>
35 #include <linux/llist.h>
36 #include <linux/bitmap.h>
40 #define VHOST_SCSI_VERSION "v0.1"
41 #define VHOST_SCSI_NAMELEN 256
42 #define VHOST_SCSI_MAX_CDB_SIZE 32
43 #define VHOST_SCSI_PREALLOC_SGLS 2048
44 #define VHOST_SCSI_PREALLOC_UPAGES 2048
45 #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
47 /* Max number of requests before requeueing the job.
48 * Using this limit prevents one virtqueue from starving others with
51 #define VHOST_SCSI_WEIGHT 256
53 struct vhost_scsi_inflight {
54 /* Wait for the flush operation to finish */
55 struct completion comp;
56 /* Refcount for the inflight reqs */
60 struct vhost_scsi_cmd {
61 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
63 /* virtio-scsi initiator task attribute */
65 /* virtio-scsi response incoming iovecs */
67 /* virtio-scsi initiator data direction */
68 enum dma_data_direction tvc_data_direction;
69 /* Expected data transfer length from virtio-scsi header */
71 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
73 /* The number of scatterlists associated with this cmd */
75 u32 tvc_prot_sgl_count;
76 /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
78 /* Pointer to the SGL formatted memory from virtio-scsi */
79 struct scatterlist *tvc_sgl;
80 struct scatterlist *tvc_prot_sgl;
81 struct page **tvc_upages;
82 /* Pointer to response header iovec */
83 struct iovec *tvc_resp_iov;
84 /* Pointer to vhost_scsi for our device */
85 struct vhost_scsi *tvc_vhost;
86 /* Pointer to vhost_virtqueue for the cmd */
87 struct vhost_virtqueue *tvc_vq;
88 /* Pointer to vhost nexus memory */
89 struct vhost_scsi_nexus *tvc_nexus;
90 /* The TCM I/O descriptor that is accessed via container_of() */
91 struct se_cmd tvc_se_cmd;
92 /* Copy of the incoming SCSI command descriptor block (CDB) */
93 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
94 /* Sense buffer that will be mapped into outgoing status */
95 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
96 /* Completed commands list, serviced from vhost worker thread */
97 struct llist_node tvc_completion_list;
98 /* Used to track inflight cmd */
99 struct vhost_scsi_inflight *inflight;
102 struct vhost_scsi_nexus {
103 /* Pointer to TCM session for I_T Nexus */
104 struct se_session *tvn_se_sess;
107 struct vhost_scsi_tpg {
108 /* Vhost port target portal group tag for TCM */
110 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
111 int tv_tpg_port_count;
112 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
113 int tv_tpg_vhost_count;
114 /* Used for enabling T10-PI with legacy devices */
115 int tv_fabric_prot_type;
116 /* list for vhost_scsi_list */
117 struct list_head tv_tpg_list;
118 /* Used to protect access for tpg_nexus */
119 struct mutex tv_tpg_mutex;
120 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
121 struct vhost_scsi_nexus *tpg_nexus;
122 /* Pointer back to vhost_scsi_tport */
123 struct vhost_scsi_tport *tport;
124 /* Returned by vhost_scsi_make_tpg() */
125 struct se_portal_group se_tpg;
126 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
127 struct vhost_scsi *vhost_scsi;
128 struct list_head tmf_queue;
131 struct vhost_scsi_tport {
132 /* SCSI protocol the tport is providing */
134 /* Binary World Wide unique Port Name for Vhost Target port */
136 /* ASCII formatted WWPN for Vhost Target port */
137 char tport_name[VHOST_SCSI_NAMELEN];
138 /* Returned by vhost_scsi_make_tport() */
139 struct se_wwn tport_wwn;
142 struct vhost_scsi_evt {
143 /* event to be sent to guest */
144 struct virtio_scsi_event event;
145 /* event list, serviced from vhost worker thread */
146 struct llist_node list;
150 VHOST_SCSI_VQ_CTL = 0,
151 VHOST_SCSI_VQ_EVT = 1,
152 VHOST_SCSI_VQ_IO = 2,
155 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
157 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
158 (1ULL << VIRTIO_SCSI_F_T10_PI)
161 #define VHOST_SCSI_MAX_TARGET 256
162 #define VHOST_SCSI_MAX_IO_VQ 1024
163 #define VHOST_SCSI_MAX_EVENT 128
165 static unsigned vhost_scsi_max_io_vqs = 128;
166 module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
167 MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
169 struct vhost_scsi_virtqueue {
170 struct vhost_virtqueue vq;
172 * Reference counting for inflight reqs, used for flush operation. At
173 * each time, one reference tracks new commands submitted, while we
174 * wait for another one to reach 0.
176 struct vhost_scsi_inflight inflights[2];
178 * Indicate current inflight in use, protected by vq->mutex.
179 * Writers must also take dev mutex and flush under it.
182 struct vhost_scsi_cmd *scsi_cmds;
183 struct sbitmap scsi_tags;
188 /* Protected by vhost_scsi->dev.mutex */
189 struct vhost_scsi_tpg **vs_tpg;
190 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
192 struct vhost_dev dev;
193 struct vhost_scsi_virtqueue *vqs;
194 unsigned long *compl_bitmap;
195 struct vhost_scsi_inflight **old_inflight;
197 struct vhost_work vs_completion_work; /* cmd completion work item */
198 struct llist_head vs_completion_list; /* cmd completion queue */
200 struct vhost_work vs_event_work; /* evt injection work item */
201 struct llist_head vs_event_list; /* evt injection queue */
203 bool vs_events_missed; /* any missed events, protected by vq->mutex */
204 int vs_events_nr; /* num of pending events, protected by vq->mutex */
207 struct vhost_scsi_tmf {
208 struct vhost_work vwork;
209 struct vhost_scsi_tpg *tpg;
210 struct vhost_scsi *vhost;
211 struct vhost_scsi_virtqueue *svq;
212 struct list_head queue_entry;
214 struct se_cmd se_cmd;
216 struct vhost_scsi_inflight *inflight;
217 struct iovec resp_iov;
223 * Context for processing request and control queue operations.
225 struct vhost_scsi_ctx {
227 unsigned int out, in;
228 size_t req_size, rsp_size;
229 size_t out_size, in_size;
232 struct iov_iter out_iter;
235 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
236 static DEFINE_MUTEX(vhost_scsi_mutex);
237 static LIST_HEAD(vhost_scsi_list);
239 static void vhost_scsi_done_inflight(struct kref *kref)
241 struct vhost_scsi_inflight *inflight;
243 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
244 complete(&inflight->comp);
247 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
248 struct vhost_scsi_inflight *old_inflight[])
250 struct vhost_scsi_inflight *new_inflight;
251 struct vhost_virtqueue *vq;
254 for (i = 0; i < vs->dev.nvqs; i++) {
257 mutex_lock(&vq->mutex);
259 /* store old infight */
260 idx = vs->vqs[i].inflight_idx;
262 old_inflight[i] = &vs->vqs[i].inflights[idx];
264 /* setup new infight */
265 vs->vqs[i].inflight_idx = idx ^ 1;
266 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
267 kref_init(&new_inflight->kref);
268 init_completion(&new_inflight->comp);
270 mutex_unlock(&vq->mutex);
274 static struct vhost_scsi_inflight *
275 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
277 struct vhost_scsi_inflight *inflight;
278 struct vhost_scsi_virtqueue *svq;
280 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
281 inflight = &svq->inflights[svq->inflight_idx];
282 kref_get(&inflight->kref);
287 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
289 kref_put(&inflight->kref, vhost_scsi_done_inflight);
292 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
297 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
302 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
304 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
305 struct vhost_scsi_tpg, se_tpg);
306 struct vhost_scsi_tport *tport = tpg->tport;
308 return &tport->tport_name[0];
311 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
313 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
314 struct vhost_scsi_tpg, se_tpg);
315 return tpg->tport_tpgt;
318 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
320 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
321 struct vhost_scsi_tpg, se_tpg);
323 return tpg->tv_fabric_prot_type;
326 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
331 static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
333 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
334 struct vhost_scsi_cmd, tvc_se_cmd);
335 struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
336 struct vhost_scsi_virtqueue, vq);
337 struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
340 if (tv_cmd->tvc_sgl_count) {
341 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
342 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
344 if (tv_cmd->tvc_prot_sgl_count) {
345 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
346 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
349 sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
350 vhost_scsi_put_inflight(inflight);
353 static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
355 struct vhost_scsi_tpg *tpg = tmf->tpg;
356 struct vhost_scsi_inflight *inflight = tmf->inflight;
358 mutex_lock(&tpg->tv_tpg_mutex);
359 list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
360 mutex_unlock(&tpg->tv_tpg_mutex);
361 vhost_scsi_put_inflight(inflight);
364 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
366 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
367 struct vhost_scsi_tmf *tmf = container_of(se_cmd,
368 struct vhost_scsi_tmf, se_cmd);
370 vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
372 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
373 struct vhost_scsi_cmd, tvc_se_cmd);
374 struct vhost_scsi *vs = cmd->tvc_vhost;
376 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
377 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
381 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
386 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
388 /* Go ahead and process the write immediately */
389 target_execute_cmd(se_cmd);
393 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
398 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
403 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
405 transport_generic_free_cmd(se_cmd, 0);
409 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
411 transport_generic_free_cmd(se_cmd, 0);
415 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
417 struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
420 tmf->scsi_resp = se_cmd->se_tmr_req->response;
421 transport_generic_free_cmd(&tmf->se_cmd, 0);
424 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
429 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
435 static struct vhost_scsi_evt *
436 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
437 u32 event, u32 reason)
439 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
440 struct vhost_scsi_evt *evt;
442 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
443 vs->vs_events_missed = true;
447 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
449 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
450 vs->vs_events_missed = true;
454 evt->event.event = cpu_to_vhost32(vq, event);
455 evt->event.reason = cpu_to_vhost32(vq, reason);
461 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
463 return target_put_sess_cmd(se_cmd);
467 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
469 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
470 struct virtio_scsi_event *event = &evt->event;
471 struct virtio_scsi_event __user *eventp;
475 if (!vhost_vq_get_backend(vq)) {
476 vs->vs_events_missed = true;
481 vhost_disable_notify(&vs->dev, vq);
482 head = vhost_get_vq_desc(vq, vq->iov,
483 ARRAY_SIZE(vq->iov), &out, &in,
486 vs->vs_events_missed = true;
489 if (head == vq->num) {
490 if (vhost_enable_notify(&vs->dev, vq))
492 vs->vs_events_missed = true;
496 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
497 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
498 vq->iov[out].iov_len);
499 vs->vs_events_missed = true;
503 if (vs->vs_events_missed) {
504 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
505 vs->vs_events_missed = false;
508 eventp = vq->iov[out].iov_base;
509 ret = __copy_to_user(eventp, event, sizeof(*event));
511 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
513 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
516 static void vhost_scsi_evt_work(struct vhost_work *work)
518 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
520 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
521 struct vhost_scsi_evt *evt, *t;
522 struct llist_node *llnode;
524 mutex_lock(&vq->mutex);
525 llnode = llist_del_all(&vs->vs_event_list);
526 llist_for_each_entry_safe(evt, t, llnode, list) {
527 vhost_scsi_do_evt_work(vs, evt);
528 vhost_scsi_free_evt(vs, evt);
530 mutex_unlock(&vq->mutex);
533 /* Fill in status and signal that we are done processing this command
535 * This is scheduled in the vhost work queue so we are called with the owner
536 * process mm and can access the vring.
538 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
540 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
542 struct virtio_scsi_cmd_resp v_rsp;
543 struct vhost_scsi_cmd *cmd, *t;
544 struct llist_node *llnode;
545 struct se_cmd *se_cmd;
546 struct iov_iter iov_iter;
549 bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
550 llnode = llist_del_all(&vs->vs_completion_list);
551 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
552 se_cmd = &cmd->tvc_se_cmd;
554 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
555 cmd, se_cmd->residual_count, se_cmd->scsi_status);
557 memset(&v_rsp, 0, sizeof(v_rsp));
558 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
559 /* TODO is status_qualifier field needed? */
560 v_rsp.status = se_cmd->scsi_status;
561 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
562 se_cmd->scsi_sense_length);
563 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
564 se_cmd->scsi_sense_length);
566 iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
567 cmd->tvc_in_iovs, sizeof(v_rsp));
568 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
569 if (likely(ret == sizeof(v_rsp))) {
570 struct vhost_scsi_virtqueue *q;
571 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
572 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
574 __set_bit(vq, vs->compl_bitmap);
576 pr_err("Faulted on virtio_scsi_cmd_resp\n");
578 vhost_scsi_release_cmd_res(se_cmd);
582 while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
584 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
587 static struct vhost_scsi_cmd *
588 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
589 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
590 u32 exp_data_len, int data_direction)
592 struct vhost_scsi_virtqueue *svq = container_of(vq,
593 struct vhost_scsi_virtqueue, vq);
594 struct vhost_scsi_cmd *cmd;
595 struct vhost_scsi_nexus *tv_nexus;
596 struct scatterlist *sg, *prot_sg;
597 struct iovec *tvc_resp_iov;
601 tv_nexus = tpg->tpg_nexus;
603 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
604 return ERR_PTR(-EIO);
607 tag = sbitmap_get(&svq->scsi_tags);
609 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
610 return ERR_PTR(-ENOMEM);
613 cmd = &svq->scsi_cmds[tag];
615 prot_sg = cmd->tvc_prot_sgl;
616 pages = cmd->tvc_upages;
617 tvc_resp_iov = cmd->tvc_resp_iov;
618 memset(cmd, 0, sizeof(*cmd));
620 cmd->tvc_prot_sgl = prot_sg;
621 cmd->tvc_upages = pages;
622 cmd->tvc_se_cmd.map_tag = tag;
623 cmd->tvc_tag = scsi_tag;
625 cmd->tvc_task_attr = task_attr;
626 cmd->tvc_exp_data_len = exp_data_len;
627 cmd->tvc_data_direction = data_direction;
628 cmd->tvc_nexus = tv_nexus;
629 cmd->inflight = vhost_scsi_get_inflight(vq);
630 cmd->tvc_resp_iov = tvc_resp_iov;
632 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
638 * Map a user memory range into a scatterlist
640 * Returns the number of scatterlist entries used or -errno on error.
643 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
644 struct iov_iter *iter,
645 struct scatterlist *sgl,
648 struct page **pages = cmd->tvc_upages;
649 struct scatterlist *sg = sgl;
652 unsigned int npages = 0;
654 bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
655 VHOST_SCSI_PREALLOC_UPAGES, &offset);
656 /* No pages were pinned */
658 return bytes < 0 ? bytes : -EFAULT;
661 unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
662 sg_set_page(sg++, pages[npages++], n, offset);
670 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
674 if (!iter || !iter->iov) {
675 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
676 " present\n", __func__, bytes);
680 sgl_count = iov_iter_npages(iter, 0xffff);
681 if (sgl_count > max_sgls) {
682 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
683 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
690 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
691 struct iov_iter *iter,
692 struct scatterlist *sg, int sg_count)
694 struct scatterlist *p = sg;
697 while (iov_iter_count(iter)) {
698 ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
701 struct page *page = sg_page(p++);
713 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
714 size_t prot_bytes, struct iov_iter *prot_iter,
715 size_t data_bytes, struct iov_iter *data_iter)
718 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
721 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
722 VHOST_SCSI_PREALLOC_PROT_SGLS);
726 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
727 cmd->tvc_prot_sgl_count = sgl_count;
728 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
729 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
731 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
733 cmd->tvc_prot_sgl_count);
735 cmd->tvc_prot_sgl_count = 0;
739 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
740 VHOST_SCSI_PREALLOC_SGLS);
744 sg_init_table(cmd->tvc_sgl, sgl_count);
745 cmd->tvc_sgl_count = sgl_count;
746 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
747 cmd->tvc_sgl, cmd->tvc_sgl_count);
749 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
750 cmd->tvc_sgl, cmd->tvc_sgl_count);
752 cmd->tvc_sgl_count = 0;
758 static int vhost_scsi_to_tcm_attr(int attr)
761 case VIRTIO_SCSI_S_SIMPLE:
762 return TCM_SIMPLE_TAG;
763 case VIRTIO_SCSI_S_ORDERED:
764 return TCM_ORDERED_TAG;
765 case VIRTIO_SCSI_S_HEAD:
767 case VIRTIO_SCSI_S_ACA:
772 return TCM_SIMPLE_TAG;
775 static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
777 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
778 struct vhost_scsi_nexus *tv_nexus;
779 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
781 /* FIXME: BIDI operation */
782 if (cmd->tvc_sgl_count) {
783 sg_ptr = cmd->tvc_sgl;
785 if (cmd->tvc_prot_sgl_count)
786 sg_prot_ptr = cmd->tvc_prot_sgl;
788 se_cmd->prot_pto = true;
792 tv_nexus = cmd->tvc_nexus;
795 target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
796 cmd->tvc_lun, cmd->tvc_exp_data_len,
797 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
798 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
800 if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
801 cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
802 cmd->tvc_prot_sgl_count, GFP_KERNEL))
805 target_queue_submission(se_cmd);
809 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
810 struct vhost_virtqueue *vq,
811 int head, unsigned out)
813 struct virtio_scsi_cmd_resp __user *resp;
814 struct virtio_scsi_cmd_resp rsp;
817 memset(&rsp, 0, sizeof(rsp));
818 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
819 resp = vq->iov[out].iov_base;
820 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
822 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
824 pr_err("Faulted on virtio_scsi_cmd_resp\n");
828 vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
829 struct vhost_scsi_ctx *vc)
833 vc->head = vhost_get_vq_desc(vq, vq->iov,
834 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
837 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
838 vc->head, vc->out, vc->in);
840 /* On error, stop handling until the next kick. */
841 if (unlikely(vc->head < 0))
844 /* Nothing new? Wait for eventfd to tell us they refilled. */
845 if (vc->head == vq->num) {
846 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
847 vhost_disable_notify(&vs->dev, vq);
854 * Get the size of request and response buffers.
855 * FIXME: Not correct for BIDI operation
857 vc->out_size = iov_length(vq->iov, vc->out);
858 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
861 * Copy over the virtio-scsi request header, which for a
862 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
863 * single iovec may contain both the header + outgoing
866 * copy_from_iter() will advance out_iter, so that it will
867 * point at the start of the outgoing WRITE payload, if
868 * DMA_TO_DEVICE is set.
870 iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
878 vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
880 if (unlikely(vc->in_size < vc->rsp_size)) {
882 "Response buf too small, need min %zu bytes got %zu",
883 vc->rsp_size, vc->in_size);
885 } else if (unlikely(vc->out_size < vc->req_size)) {
887 "Request buf too small, need min %zu bytes got %zu",
888 vc->req_size, vc->out_size);
896 vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
897 struct vhost_scsi_tpg **tpgp)
901 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
903 vq_err(vq, "Faulted on copy_from_iter_full\n");
904 } else if (unlikely(*vc->lunp != 1)) {
905 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
906 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
908 struct vhost_scsi_tpg **vs_tpg, *tpg;
910 vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
912 tpg = READ_ONCE(vs_tpg[*vc->target]);
913 if (unlikely(!tpg)) {
914 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
925 static u16 vhost_buf_to_lun(u8 *lun_buf)
927 return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
931 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
933 struct vhost_scsi_tpg **vs_tpg, *tpg;
934 struct virtio_scsi_cmd_req v_req;
935 struct virtio_scsi_cmd_req_pi v_req_pi;
936 struct vhost_scsi_ctx vc;
937 struct vhost_scsi_cmd *cmd;
938 struct iov_iter in_iter, prot_iter, data_iter;
940 u32 exp_data_len, data_direction;
941 int ret, prot_bytes, i, c = 0;
944 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
947 mutex_lock(&vq->mutex);
949 * We can handle the vq only after the endpoint is setup by calling the
950 * VHOST_SCSI_SET_ENDPOINT ioctl.
952 vs_tpg = vhost_vq_get_backend(vq);
956 memset(&vc, 0, sizeof(vc));
957 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
959 vhost_disable_notify(&vs->dev, vq);
962 ret = vhost_scsi_get_desc(vs, vq, &vc);
967 * Setup pointers and values based upon different virtio-scsi
968 * request header if T10_PI is enabled in KVM guest.
972 vc.req_size = sizeof(v_req_pi);
973 vc.lunp = &v_req_pi.lun[0];
974 vc.target = &v_req_pi.lun[1];
977 vc.req_size = sizeof(v_req);
978 vc.lunp = &v_req.lun[0];
979 vc.target = &v_req.lun[1];
983 * Validate the size of request and response buffers.
984 * Check for a sane response buffer so we can report
985 * early errors back to the guest.
987 ret = vhost_scsi_chk_size(vq, &vc);
991 ret = vhost_scsi_get_req(vq, &vc, &tpg);
995 ret = -EIO; /* bad target on any error from here on */
998 * Determine data_direction by calculating the total outgoing
999 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1000 * response headers respectively.
1002 * For DMA_TO_DEVICE this is out_iter, which is already pointing
1003 * to the right place.
1005 * For DMA_FROM_DEVICE, the iovec will be just past the end
1006 * of the virtio-scsi response header in either the same
1007 * or immediately following iovec.
1009 * Any associated T10_PI bytes for the outgoing / incoming
1010 * payloads are included in calculation of exp_data_len here.
1014 if (vc.out_size > vc.req_size) {
1015 data_direction = DMA_TO_DEVICE;
1016 exp_data_len = vc.out_size - vc.req_size;
1017 data_iter = vc.out_iter;
1018 } else if (vc.in_size > vc.rsp_size) {
1019 data_direction = DMA_FROM_DEVICE;
1020 exp_data_len = vc.in_size - vc.rsp_size;
1022 iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
1023 vc.rsp_size + exp_data_len);
1024 iov_iter_advance(&in_iter, vc.rsp_size);
1025 data_iter = in_iter;
1027 data_direction = DMA_NONE;
1031 * If T10_PI header + payload is present, setup prot_iter values
1032 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1033 * host scatterlists via get_user_pages_fast().
1036 if (v_req_pi.pi_bytesout) {
1037 if (data_direction != DMA_TO_DEVICE) {
1038 vq_err(vq, "Received non zero pi_bytesout,"
1039 " but wrong data_direction\n");
1042 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1043 } else if (v_req_pi.pi_bytesin) {
1044 if (data_direction != DMA_FROM_DEVICE) {
1045 vq_err(vq, "Received non zero pi_bytesin,"
1046 " but wrong data_direction\n");
1049 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1052 * Set prot_iter to data_iter and truncate it to
1053 * prot_bytes, and advance data_iter past any
1054 * preceeding prot_bytes that may be present.
1056 * Also fix up the exp_data_len to reflect only the
1057 * actual data payload length.
1060 exp_data_len -= prot_bytes;
1061 prot_iter = data_iter;
1062 iov_iter_truncate(&prot_iter, prot_bytes);
1063 iov_iter_advance(&data_iter, prot_bytes);
1065 tag = vhost64_to_cpu(vq, v_req_pi.tag);
1066 task_attr = v_req_pi.task_attr;
1067 cdb = &v_req_pi.cdb[0];
1068 lun = vhost_buf_to_lun(v_req_pi.lun);
1070 tag = vhost64_to_cpu(vq, v_req.tag);
1071 task_attr = v_req.task_attr;
1072 cdb = &v_req.cdb[0];
1073 lun = vhost_buf_to_lun(v_req.lun);
1076 * Check that the received CDB size does not exceeded our
1077 * hardcoded max for vhost-scsi, then get a pre-allocated
1078 * cmd descriptor for the new virtio-scsi tag.
1080 * TODO what if cdb was too small for varlen cdb header?
1082 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1083 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1084 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1085 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1088 cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1089 exp_data_len + prot_bytes,
1092 vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
1096 cmd->tvc_vhost = vs;
1098 for (i = 0; i < vc.in ; i++)
1099 cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
1100 cmd->tvc_in_iovs = vc.in;
1102 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1103 cmd->tvc_cdb[0], cmd->tvc_lun);
1104 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1105 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1107 if (data_direction != DMA_NONE) {
1108 if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1109 &prot_iter, exp_data_len,
1111 vq_err(vq, "Failed to map iov to sgl\n");
1112 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
1117 * Save the descriptor from vhost_get_vq_desc() to be used to
1118 * complete the virtio-scsi request in TCM callback context via
1119 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1121 cmd->tvc_vq_desc = vc.head;
1122 vhost_scsi_target_queue_cmd(cmd);
1126 * ENXIO: No more requests, or read error, wait for next kick
1127 * EINVAL: Invalid response buffer, drop the request
1128 * EIO: Respond with bad target
1129 * EAGAIN: Pending request
1133 else if (ret == -EIO)
1134 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1135 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1137 mutex_unlock(&vq->mutex);
1141 vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1142 int in_iovs, int vq_desc, struct iovec *resp_iov,
1145 struct virtio_scsi_ctrl_tmf_resp rsp;
1146 struct iov_iter iov_iter;
1149 pr_debug("%s\n", __func__);
1150 memset(&rsp, 0, sizeof(rsp));
1151 rsp.response = tmf_resp_code;
1153 iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
1155 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1156 if (likely(ret == sizeof(rsp)))
1157 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
1159 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1162 static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1164 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1168 if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
1169 resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1171 resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1173 vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1174 tmf->vq_desc, &tmf->resp_iov, resp_code);
1175 vhost_scsi_release_tmf_res(tmf);
1179 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1180 struct vhost_virtqueue *vq,
1181 struct virtio_scsi_ctrl_tmf_req *vtmf,
1182 struct vhost_scsi_ctx *vc)
1184 struct vhost_scsi_virtqueue *svq = container_of(vq,
1185 struct vhost_scsi_virtqueue, vq);
1186 struct vhost_scsi_tmf *tmf;
1188 if (vhost32_to_cpu(vq, vtmf->subtype) !=
1189 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1192 if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1193 pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1197 mutex_lock(&tpg->tv_tpg_mutex);
1198 if (list_empty(&tpg->tmf_queue)) {
1199 pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
1200 mutex_unlock(&tpg->tv_tpg_mutex);
1204 tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
1206 list_del_init(&tmf->queue_entry);
1207 mutex_unlock(&tpg->tv_tpg_mutex);
1212 tmf->resp_iov = vq->iov[vc->out];
1213 tmf->vq_desc = vc->head;
1214 tmf->in_iovs = vc->in;
1215 tmf->inflight = vhost_scsi_get_inflight(vq);
1217 if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1218 vhost_buf_to_lun(vtmf->lun), NULL,
1219 TMR_LUN_RESET, GFP_KERNEL, 0,
1220 TARGET_SCF_ACK_KREF) < 0) {
1221 vhost_scsi_release_tmf_res(tmf);
1228 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1229 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1233 vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1234 struct vhost_virtqueue *vq,
1235 struct vhost_scsi_ctx *vc)
1237 struct virtio_scsi_ctrl_an_resp rsp;
1238 struct iov_iter iov_iter;
1241 pr_debug("%s\n", __func__);
1242 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1243 rsp.response = VIRTIO_SCSI_S_OK;
1245 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
1247 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1248 if (likely(ret == sizeof(rsp)))
1249 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1251 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1255 vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1257 struct vhost_scsi_tpg *tpg;
1260 struct virtio_scsi_ctrl_an_req an;
1261 struct virtio_scsi_ctrl_tmf_req tmf;
1263 struct vhost_scsi_ctx vc;
1267 mutex_lock(&vq->mutex);
1269 * We can handle the vq only after the endpoint is setup by calling the
1270 * VHOST_SCSI_SET_ENDPOINT ioctl.
1272 if (!vhost_vq_get_backend(vq))
1275 memset(&vc, 0, sizeof(vc));
1277 vhost_disable_notify(&vs->dev, vq);
1280 ret = vhost_scsi_get_desc(vs, vq, &vc);
1285 * Get the request type first in order to setup
1286 * other parameters dependent on the type.
1288 vc.req = &v_req.type;
1289 typ_size = sizeof(v_req.type);
1291 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1293 vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1295 * The size of the response buffer depends on the
1296 * request type and must be validated against it.
1297 * Since the request type is not known, don't send
1303 switch (vhost32_to_cpu(vq, v_req.type)) {
1304 case VIRTIO_SCSI_T_TMF:
1305 vc.req = &v_req.tmf;
1306 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1307 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1308 vc.lunp = &v_req.tmf.lun[0];
1309 vc.target = &v_req.tmf.lun[1];
1311 case VIRTIO_SCSI_T_AN_QUERY:
1312 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
1314 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1315 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1316 vc.lunp = &v_req.an.lun[0];
1320 vq_err(vq, "Unknown control request %d", v_req.type);
1325 * Validate the size of request and response buffers.
1326 * Check for a sane response buffer so we can report
1327 * early errors back to the guest.
1329 ret = vhost_scsi_chk_size(vq, &vc);
1334 * Get the rest of the request now that its size is known.
1337 vc.req_size -= typ_size;
1339 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1343 if (v_req.type == VIRTIO_SCSI_T_TMF)
1344 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1346 vhost_scsi_send_an_resp(vs, vq, &vc);
1349 * ENXIO: No more requests, or read error, wait for next kick
1350 * EINVAL: Invalid response buffer, drop the request
1351 * EIO: Respond with bad target
1352 * EAGAIN: Pending request
1356 else if (ret == -EIO)
1357 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
1358 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1360 mutex_unlock(&vq->mutex);
1363 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1365 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1367 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1369 pr_debug("%s: The handling func for control queue.\n", __func__);
1370 vhost_scsi_ctl_handle_vq(vs, vq);
1374 vhost_scsi_send_evt(struct vhost_scsi *vs,
1375 struct vhost_scsi_tpg *tpg,
1380 struct vhost_scsi_evt *evt;
1382 evt = vhost_scsi_allocate_evt(vs, event, reason);
1387 /* TODO: share lun setup code with virtio-scsi.ko */
1389 * Note: evt->event is zeroed when we allocate it and
1390 * lun[4-7] need to be zero according to virtio-scsi spec.
1392 evt->event.lun[0] = 0x01;
1393 evt->event.lun[1] = tpg->tport_tpgt;
1394 if (lun->unpacked_lun >= 256)
1395 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1396 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1399 llist_add(&evt->list, &vs->vs_event_list);
1400 vhost_work_queue(&vs->dev, &vs->vs_event_work);
1403 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1405 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1407 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1409 mutex_lock(&vq->mutex);
1410 if (!vhost_vq_get_backend(vq))
1413 if (vs->vs_events_missed)
1414 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1416 mutex_unlock(&vq->mutex);
1419 static void vhost_scsi_handle_kick(struct vhost_work *work)
1421 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1423 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1425 vhost_scsi_handle_vq(vs, vq);
1428 /* Callers must hold dev mutex */
1429 static void vhost_scsi_flush(struct vhost_scsi *vs)
1433 /* Init new inflight and remember the old inflight */
1434 vhost_scsi_init_inflight(vs, vs->old_inflight);
1437 * The inflight->kref was initialized to 1. We decrement it here to
1438 * indicate the start of the flush operation so that it will reach 0
1439 * when all the reqs are finished.
1441 for (i = 0; i < vs->dev.nvqs; i++)
1442 kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
1444 /* Flush both the vhost poll and vhost work */
1445 vhost_dev_flush(&vs->dev);
1447 /* Wait for all reqs issued before the flush to be finished */
1448 for (i = 0; i < vs->dev.nvqs; i++)
1449 wait_for_completion(&vs->old_inflight[i]->comp);
1452 static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1454 struct vhost_scsi_virtqueue *svq = container_of(vq,
1455 struct vhost_scsi_virtqueue, vq);
1456 struct vhost_scsi_cmd *tv_cmd;
1459 if (!svq->scsi_cmds)
1462 for (i = 0; i < svq->max_cmds; i++) {
1463 tv_cmd = &svq->scsi_cmds[i];
1465 kfree(tv_cmd->tvc_sgl);
1466 kfree(tv_cmd->tvc_prot_sgl);
1467 kfree(tv_cmd->tvc_upages);
1468 kfree(tv_cmd->tvc_resp_iov);
1471 sbitmap_free(&svq->scsi_tags);
1472 kfree(svq->scsi_cmds);
1473 svq->scsi_cmds = NULL;
1476 static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1478 struct vhost_scsi_virtqueue *svq = container_of(vq,
1479 struct vhost_scsi_virtqueue, vq);
1480 struct vhost_scsi_cmd *tv_cmd;
1486 if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
1487 NUMA_NO_NODE, false, true))
1489 svq->max_cmds = max_cmds;
1491 svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1492 if (!svq->scsi_cmds) {
1493 sbitmap_free(&svq->scsi_tags);
1497 for (i = 0; i < max_cmds; i++) {
1498 tv_cmd = &svq->scsi_cmds[i];
1500 tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1501 sizeof(struct scatterlist),
1503 if (!tv_cmd->tvc_sgl) {
1504 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1508 tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1509 sizeof(struct page *),
1511 if (!tv_cmd->tvc_upages) {
1512 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1516 tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
1517 sizeof(struct iovec),
1519 if (!tv_cmd->tvc_resp_iov) {
1520 pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
1524 tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1525 sizeof(struct scatterlist),
1527 if (!tv_cmd->tvc_prot_sgl) {
1528 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1534 vhost_scsi_destroy_vq_cmds(vq);
1539 * Called from vhost_scsi_ioctl() context to walk the list of available
1540 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1542 * The lock nesting rule is:
1543 * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1546 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1547 struct vhost_scsi_target *t)
1549 struct se_portal_group *se_tpg;
1550 struct vhost_scsi_tport *tv_tport;
1551 struct vhost_scsi_tpg *tpg;
1552 struct vhost_scsi_tpg **vs_tpg;
1553 struct vhost_virtqueue *vq;
1554 int index, ret, i, len;
1557 mutex_lock(&vhost_scsi_mutex);
1558 mutex_lock(&vs->dev.mutex);
1560 /* Verify that ring has been setup correctly. */
1561 for (index = 0; index < vs->dev.nvqs; ++index) {
1562 /* Verify that ring has been setup correctly. */
1563 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1569 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1570 vs_tpg = kzalloc(len, GFP_KERNEL);
1576 memcpy(vs_tpg, vs->vs_tpg, len);
1578 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1579 mutex_lock(&tpg->tv_tpg_mutex);
1580 if (!tpg->tpg_nexus) {
1581 mutex_unlock(&tpg->tv_tpg_mutex);
1584 if (tpg->tv_tpg_vhost_count != 0) {
1585 mutex_unlock(&tpg->tv_tpg_mutex);
1588 tv_tport = tpg->tport;
1590 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1591 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1592 mutex_unlock(&tpg->tv_tpg_mutex);
1597 * In order to ensure individual vhost-scsi configfs
1598 * groups cannot be removed while in use by vhost ioctl,
1599 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1602 se_tpg = &tpg->se_tpg;
1603 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1605 pr_warn("target_depend_item() failed: %d\n", ret);
1606 mutex_unlock(&tpg->tv_tpg_mutex);
1609 tpg->tv_tpg_vhost_count++;
1610 tpg->vhost_scsi = vs;
1611 vs_tpg[tpg->tport_tpgt] = tpg;
1614 mutex_unlock(&tpg->tv_tpg_mutex);
1618 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1619 sizeof(vs->vs_vhost_wwpn));
1621 for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
1622 vq = &vs->vqs[i].vq;
1623 if (!vhost_vq_is_setup(vq))
1626 ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1628 goto destroy_vq_cmds;
1631 for (i = 0; i < vs->dev.nvqs; i++) {
1632 vq = &vs->vqs[i].vq;
1633 mutex_lock(&vq->mutex);
1634 vhost_vq_set_backend(vq, vs_tpg);
1635 vhost_vq_init_access(vq);
1636 mutex_unlock(&vq->mutex);
1644 * Act as synchronize_rcu to make sure access to
1645 * old vs->vs_tpg is finished.
1647 vhost_scsi_flush(vs);
1649 vs->vs_tpg = vs_tpg;
1653 for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1654 if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1655 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1658 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1661 tpg->tv_tpg_vhost_count--;
1662 target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1667 mutex_unlock(&vs->dev.mutex);
1668 mutex_unlock(&vhost_scsi_mutex);
1673 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1674 struct vhost_scsi_target *t)
1676 struct se_portal_group *se_tpg;
1677 struct vhost_scsi_tport *tv_tport;
1678 struct vhost_scsi_tpg *tpg;
1679 struct vhost_virtqueue *vq;
1684 mutex_lock(&vhost_scsi_mutex);
1685 mutex_lock(&vs->dev.mutex);
1686 /* Verify that ring has been setup correctly. */
1687 for (index = 0; index < vs->dev.nvqs; ++index) {
1688 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1699 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1701 tpg = vs->vs_tpg[target];
1705 mutex_lock(&tpg->tv_tpg_mutex);
1706 tv_tport = tpg->tport;
1712 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1713 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1714 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1715 tv_tport->tport_name, tpg->tport_tpgt,
1716 t->vhost_wwpn, t->vhost_tpgt);
1720 tpg->tv_tpg_vhost_count--;
1721 tpg->vhost_scsi = NULL;
1722 vs->vs_tpg[target] = NULL;
1724 mutex_unlock(&tpg->tv_tpg_mutex);
1726 * Release se_tpg->tpg_group.cg_item configfs dependency now
1727 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1729 se_tpg = &tpg->se_tpg;
1730 target_undepend_item(&se_tpg->tpg_group.cg_item);
1733 for (i = 0; i < vs->dev.nvqs; i++) {
1734 vq = &vs->vqs[i].vq;
1735 mutex_lock(&vq->mutex);
1736 vhost_vq_set_backend(vq, NULL);
1737 mutex_unlock(&vq->mutex);
1739 /* Make sure cmds are not running before tearing them down. */
1740 vhost_scsi_flush(vs);
1742 for (i = 0; i < vs->dev.nvqs; i++) {
1743 vq = &vs->vqs[i].vq;
1744 vhost_scsi_destroy_vq_cmds(vq);
1748 * Act as synchronize_rcu to make sure access to
1749 * old vs->vs_tpg is finished.
1751 vhost_scsi_flush(vs);
1754 WARN_ON(vs->vs_events_nr);
1755 mutex_unlock(&vs->dev.mutex);
1756 mutex_unlock(&vhost_scsi_mutex);
1760 mutex_unlock(&tpg->tv_tpg_mutex);
1762 mutex_unlock(&vs->dev.mutex);
1763 mutex_unlock(&vhost_scsi_mutex);
1767 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1769 struct vhost_virtqueue *vq;
1772 if (features & ~VHOST_SCSI_FEATURES)
1775 mutex_lock(&vs->dev.mutex);
1776 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1777 !vhost_log_access_ok(&vs->dev)) {
1778 mutex_unlock(&vs->dev.mutex);
1782 for (i = 0; i < vs->dev.nvqs; i++) {
1783 vq = &vs->vqs[i].vq;
1784 mutex_lock(&vq->mutex);
1785 vq->acked_features = features;
1786 mutex_unlock(&vq->mutex);
1788 mutex_unlock(&vs->dev.mutex);
1792 static int vhost_scsi_open(struct inode *inode, struct file *f)
1794 struct vhost_scsi *vs;
1795 struct vhost_virtqueue **vqs;
1796 int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
1798 vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1802 if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
1803 pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
1804 VHOST_SCSI_MAX_IO_VQ);
1805 nvqs = VHOST_SCSI_MAX_IO_VQ;
1806 } else if (nvqs == 0) {
1807 pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
1810 nvqs += VHOST_SCSI_VQ_IO;
1812 vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
1813 if (!vs->compl_bitmap)
1814 goto err_compl_bitmap;
1816 vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1817 GFP_KERNEL | __GFP_ZERO);
1818 if (!vs->old_inflight)
1821 vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1822 GFP_KERNEL | __GFP_ZERO);
1826 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1830 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1831 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1833 vs->vs_events_nr = 0;
1834 vs->vs_events_missed = false;
1836 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1837 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1838 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1839 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1840 for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
1841 vqs[i] = &vs->vqs[i].vq;
1842 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1844 vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
1845 VHOST_SCSI_WEIGHT, 0, true, NULL);
1847 vhost_scsi_init_inflight(vs, NULL);
1849 f->private_data = vs;
1855 kfree(vs->old_inflight);
1857 bitmap_free(vs->compl_bitmap);
1864 static int vhost_scsi_release(struct inode *inode, struct file *f)
1866 struct vhost_scsi *vs = f->private_data;
1867 struct vhost_scsi_target t;
1869 mutex_lock(&vs->dev.mutex);
1870 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1871 mutex_unlock(&vs->dev.mutex);
1872 vhost_scsi_clear_endpoint(vs, &t);
1873 vhost_dev_stop(&vs->dev);
1874 vhost_dev_cleanup(&vs->dev);
1877 kfree(vs->old_inflight);
1878 bitmap_free(vs->compl_bitmap);
1884 vhost_scsi_ioctl(struct file *f,
1888 struct vhost_scsi *vs = f->private_data;
1889 struct vhost_scsi_target backend;
1890 void __user *argp = (void __user *)arg;
1891 u64 __user *featurep = argp;
1892 u32 __user *eventsp = argp;
1895 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1896 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1899 case VHOST_SCSI_SET_ENDPOINT:
1900 if (copy_from_user(&backend, argp, sizeof backend))
1902 if (backend.reserved != 0)
1905 return vhost_scsi_set_endpoint(vs, &backend);
1906 case VHOST_SCSI_CLEAR_ENDPOINT:
1907 if (copy_from_user(&backend, argp, sizeof backend))
1909 if (backend.reserved != 0)
1912 return vhost_scsi_clear_endpoint(vs, &backend);
1913 case VHOST_SCSI_GET_ABI_VERSION:
1914 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1917 case VHOST_SCSI_SET_EVENTS_MISSED:
1918 if (get_user(events_missed, eventsp))
1920 mutex_lock(&vq->mutex);
1921 vs->vs_events_missed = events_missed;
1922 mutex_unlock(&vq->mutex);
1924 case VHOST_SCSI_GET_EVENTS_MISSED:
1925 mutex_lock(&vq->mutex);
1926 events_missed = vs->vs_events_missed;
1927 mutex_unlock(&vq->mutex);
1928 if (put_user(events_missed, eventsp))
1931 case VHOST_GET_FEATURES:
1932 features = VHOST_SCSI_FEATURES;
1933 if (copy_to_user(featurep, &features, sizeof features))
1936 case VHOST_SET_FEATURES:
1937 if (copy_from_user(&features, featurep, sizeof features))
1939 return vhost_scsi_set_features(vs, features);
1941 mutex_lock(&vs->dev.mutex);
1942 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1943 /* TODO: flush backend after dev ioctl. */
1944 if (r == -ENOIOCTLCMD)
1945 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1946 mutex_unlock(&vs->dev.mutex);
1951 static const struct file_operations vhost_scsi_fops = {
1952 .owner = THIS_MODULE,
1953 .release = vhost_scsi_release,
1954 .unlocked_ioctl = vhost_scsi_ioctl,
1955 .compat_ioctl = compat_ptr_ioctl,
1956 .open = vhost_scsi_open,
1957 .llseek = noop_llseek,
1960 static struct miscdevice vhost_scsi_misc = {
1966 static int __init vhost_scsi_register(void)
1968 return misc_register(&vhost_scsi_misc);
1971 static void vhost_scsi_deregister(void)
1973 misc_deregister(&vhost_scsi_misc);
1976 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1978 switch (tport->tport_proto_id) {
1979 case SCSI_PROTOCOL_SAS:
1981 case SCSI_PROTOCOL_FCP:
1983 case SCSI_PROTOCOL_ISCSI:
1993 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1994 struct se_lun *lun, bool plug)
1997 struct vhost_scsi *vs = tpg->vhost_scsi;
1998 struct vhost_virtqueue *vq;
2004 mutex_lock(&vs->dev.mutex);
2007 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
2009 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
2011 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
2012 mutex_lock(&vq->mutex);
2013 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
2014 vhost_scsi_send_evt(vs, tpg, lun,
2015 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
2016 mutex_unlock(&vq->mutex);
2017 mutex_unlock(&vs->dev.mutex);
2020 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2022 vhost_scsi_do_plug(tpg, lun, true);
2025 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2027 vhost_scsi_do_plug(tpg, lun, false);
2030 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
2033 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2034 struct vhost_scsi_tpg, se_tpg);
2035 struct vhost_scsi_tmf *tmf;
2037 tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
2040 INIT_LIST_HEAD(&tmf->queue_entry);
2041 vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
2043 mutex_lock(&vhost_scsi_mutex);
2045 mutex_lock(&tpg->tv_tpg_mutex);
2046 tpg->tv_tpg_port_count++;
2047 list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
2048 mutex_unlock(&tpg->tv_tpg_mutex);
2050 vhost_scsi_hotplug(tpg, lun);
2052 mutex_unlock(&vhost_scsi_mutex);
2057 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
2060 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2061 struct vhost_scsi_tpg, se_tpg);
2062 struct vhost_scsi_tmf *tmf;
2064 mutex_lock(&vhost_scsi_mutex);
2066 mutex_lock(&tpg->tv_tpg_mutex);
2067 tpg->tv_tpg_port_count--;
2068 tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
2070 list_del(&tmf->queue_entry);
2072 mutex_unlock(&tpg->tv_tpg_mutex);
2074 vhost_scsi_hotunplug(tpg, lun);
2076 mutex_unlock(&vhost_scsi_mutex);
2079 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2080 struct config_item *item, const char *page, size_t count)
2082 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2083 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2084 struct vhost_scsi_tpg, se_tpg);
2086 int ret = kstrtoul(page, 0, &val);
2089 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2092 if (val != 0 && val != 1 && val != 3) {
2093 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2096 tpg->tv_fabric_prot_type = val;
2101 static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2102 struct config_item *item, char *page)
2104 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2105 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2106 struct vhost_scsi_tpg, se_tpg);
2108 return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2111 CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
2113 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2114 &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
2118 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2121 struct vhost_scsi_nexus *tv_nexus;
2123 mutex_lock(&tpg->tv_tpg_mutex);
2124 if (tpg->tpg_nexus) {
2125 mutex_unlock(&tpg->tv_tpg_mutex);
2126 pr_debug("tpg->tpg_nexus already exists\n");
2130 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
2132 mutex_unlock(&tpg->tv_tpg_mutex);
2133 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2137 * Since we are running in 'demo mode' this call with generate a
2138 * struct se_node_acl for the vhost_scsi struct se_portal_group with
2139 * the SCSI Initiator port name of the passed configfs group 'name'.
2141 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2142 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
2143 (unsigned char *)name, tv_nexus, NULL);
2144 if (IS_ERR(tv_nexus->tvn_se_sess)) {
2145 mutex_unlock(&tpg->tv_tpg_mutex);
2149 tpg->tpg_nexus = tv_nexus;
2151 mutex_unlock(&tpg->tv_tpg_mutex);
2155 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2157 struct se_session *se_sess;
2158 struct vhost_scsi_nexus *tv_nexus;
2160 mutex_lock(&tpg->tv_tpg_mutex);
2161 tv_nexus = tpg->tpg_nexus;
2163 mutex_unlock(&tpg->tv_tpg_mutex);
2167 se_sess = tv_nexus->tvn_se_sess;
2169 mutex_unlock(&tpg->tv_tpg_mutex);
2173 if (tpg->tv_tpg_port_count != 0) {
2174 mutex_unlock(&tpg->tv_tpg_mutex);
2175 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2176 " active TPG port count: %d\n",
2177 tpg->tv_tpg_port_count);
2181 if (tpg->tv_tpg_vhost_count != 0) {
2182 mutex_unlock(&tpg->tv_tpg_mutex);
2183 pr_err("Unable to remove TCM_vhost I_T Nexus with"
2184 " active TPG vhost count: %d\n",
2185 tpg->tv_tpg_vhost_count);
2189 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
2190 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2191 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2194 * Release the SCSI I_T Nexus to the emulated vhost Target Port
2196 target_remove_session(se_sess);
2197 tpg->tpg_nexus = NULL;
2198 mutex_unlock(&tpg->tv_tpg_mutex);
2204 static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
2206 struct se_portal_group *se_tpg = to_tpg(item);
2207 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2208 struct vhost_scsi_tpg, se_tpg);
2209 struct vhost_scsi_nexus *tv_nexus;
2212 mutex_lock(&tpg->tv_tpg_mutex);
2213 tv_nexus = tpg->tpg_nexus;
2215 mutex_unlock(&tpg->tv_tpg_mutex);
2218 ret = sysfs_emit(page, "%s\n",
2219 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
2220 mutex_unlock(&tpg->tv_tpg_mutex);
2225 static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2226 const char *page, size_t count)
2228 struct se_portal_group *se_tpg = to_tpg(item);
2229 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2230 struct vhost_scsi_tpg, se_tpg);
2231 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2232 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
2235 * Shutdown the active I_T nexus if 'NULL' is passed..
2237 if (!strncmp(page, "NULL", 4)) {
2238 ret = vhost_scsi_drop_nexus(tpg);
2239 return (!ret) ? count : ret;
2242 * Otherwise make sure the passed virtual Initiator port WWN matches
2243 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2244 * vhost_scsi_make_nexus().
2246 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2247 pr_err("Emulated NAA Sas Address: %s, exceeds"
2248 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2251 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2253 ptr = strstr(i_port, "naa.");
2255 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2256 pr_err("Passed SAS Initiator Port %s does not"
2257 " match target port protoid: %s\n", i_port,
2258 vhost_scsi_dump_proto_id(tport_wwn));
2261 port_ptr = &i_port[0];
2264 ptr = strstr(i_port, "fc.");
2266 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2267 pr_err("Passed FCP Initiator Port %s does not"
2268 " match target port protoid: %s\n", i_port,
2269 vhost_scsi_dump_proto_id(tport_wwn));
2272 port_ptr = &i_port[3]; /* Skip over "fc." */
2275 ptr = strstr(i_port, "iqn.");
2277 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2278 pr_err("Passed iSCSI Initiator Port %s does not"
2279 " match target port protoid: %s\n", i_port,
2280 vhost_scsi_dump_proto_id(tport_wwn));
2283 port_ptr = &i_port[0];
2286 pr_err("Unable to locate prefix for emulated Initiator Port:"
2290 * Clear any trailing newline for the NAA WWN
2293 if (i_port[strlen(i_port)-1] == '\n')
2294 i_port[strlen(i_port)-1] = '\0';
2296 ret = vhost_scsi_make_nexus(tpg, port_ptr);
2303 CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
2305 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2306 &vhost_scsi_tpg_attr_nexus,
2310 static struct se_portal_group *
2311 vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
2313 struct vhost_scsi_tport *tport = container_of(wwn,
2314 struct vhost_scsi_tport, tport_wwn);
2316 struct vhost_scsi_tpg *tpg;
2320 if (strstr(name, "tpgt_") != name)
2321 return ERR_PTR(-EINVAL);
2322 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2323 return ERR_PTR(-EINVAL);
2325 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2327 pr_err("Unable to allocate struct vhost_scsi_tpg");
2328 return ERR_PTR(-ENOMEM);
2330 mutex_init(&tpg->tv_tpg_mutex);
2331 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2332 INIT_LIST_HEAD(&tpg->tmf_queue);
2334 tpg->tport_tpgt = tpgt;
2336 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2341 mutex_lock(&vhost_scsi_mutex);
2342 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2343 mutex_unlock(&vhost_scsi_mutex);
2345 return &tpg->se_tpg;
2348 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2350 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2351 struct vhost_scsi_tpg, se_tpg);
2353 mutex_lock(&vhost_scsi_mutex);
2354 list_del(&tpg->tv_tpg_list);
2355 mutex_unlock(&vhost_scsi_mutex);
2357 * Release the virtual I_T Nexus for this vhost TPG
2359 vhost_scsi_drop_nexus(tpg);
2361 * Deregister the se_tpg from TCM..
2363 core_tpg_deregister(se_tpg);
2367 static struct se_wwn *
2368 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2369 struct config_group *group,
2372 struct vhost_scsi_tport *tport;
2377 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2378 return ERR_PTR(-EINVAL); */
2380 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2382 pr_err("Unable to allocate struct vhost_scsi_tport");
2383 return ERR_PTR(-ENOMEM);
2385 tport->tport_wwpn = wwpn;
2387 * Determine the emulated Protocol Identifier and Target Port Name
2388 * based on the incoming configfs directory name.
2390 ptr = strstr(name, "naa.");
2392 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2395 ptr = strstr(name, "fc.");
2397 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2398 off = 3; /* Skip over "fc." */
2401 ptr = strstr(name, "iqn.");
2403 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2407 pr_err("Unable to locate prefix for emulated Target Port:"
2410 return ERR_PTR(-EINVAL);
2413 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2414 pr_err("Emulated %s Address: %s, exceeds"
2415 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2416 VHOST_SCSI_NAMELEN);
2418 return ERR_PTR(-EINVAL);
2420 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2422 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2423 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2425 return &tport->tport_wwn;
2428 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2430 struct vhost_scsi_tport *tport = container_of(wwn,
2431 struct vhost_scsi_tport, tport_wwn);
2433 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2434 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2441 vhost_scsi_wwn_version_show(struct config_item *item, char *page)
2443 return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
2444 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2445 utsname()->machine);
2448 CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
2450 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2451 &vhost_scsi_wwn_attr_version,
2455 static const struct target_core_fabric_ops vhost_scsi_ops = {
2456 .module = THIS_MODULE,
2457 .fabric_name = "vhost",
2458 .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
2459 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2460 .tpg_get_tag = vhost_scsi_get_tpgt,
2461 .tpg_check_demo_mode = vhost_scsi_check_true,
2462 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
2463 .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2464 .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2465 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
2466 .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
2467 .release_cmd = vhost_scsi_release_cmd,
2468 .check_stop_free = vhost_scsi_check_stop_free,
2469 .sess_get_index = vhost_scsi_sess_get_index,
2470 .sess_get_initiator_sid = NULL,
2471 .write_pending = vhost_scsi_write_pending,
2472 .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
2473 .get_cmd_state = vhost_scsi_get_cmd_state,
2474 .queue_data_in = vhost_scsi_queue_data_in,
2475 .queue_status = vhost_scsi_queue_status,
2476 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2477 .aborted_task = vhost_scsi_aborted_task,
2479 * Setup callers for generic logic in target_core_fabric_configfs.c
2481 .fabric_make_wwn = vhost_scsi_make_tport,
2482 .fabric_drop_wwn = vhost_scsi_drop_tport,
2483 .fabric_make_tpg = vhost_scsi_make_tpg,
2484 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2485 .fabric_post_link = vhost_scsi_port_link,
2486 .fabric_pre_unlink = vhost_scsi_port_unlink,
2488 .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2489 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2490 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
2493 static int __init vhost_scsi_init(void)
2497 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2498 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2499 utsname()->machine);
2501 ret = vhost_scsi_register();
2505 ret = target_register_template(&vhost_scsi_ops);
2507 goto out_vhost_scsi_deregister;
2511 out_vhost_scsi_deregister:
2512 vhost_scsi_deregister();
2517 static void vhost_scsi_exit(void)
2519 target_unregister_template(&vhost_scsi_ops);
2520 vhost_scsi_deregister();
2523 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2524 MODULE_ALIAS("tcm_vhost");
2525 MODULE_LICENSE("GPL");
2526 module_init(vhost_scsi_init);
2527 module_exit(vhost_scsi_exit);