Merge tag 'pwm/for-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[linux-block.git] / drivers / vhost / scsi.c
CommitLineData
23b228cb 1// SPDX-License-Identifier: GPL-2.0+
057cbf49
NB
2/*******************************************************************************
3 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 *
4c76251e 5 * (C) Copyright 2010-2013 Datera, Inc.
057cbf49
NB
6 * (C) Copyright 2010-2012 IBM Corp.
7 *
4c76251e 8 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
057cbf49 9 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
057cbf49
NB
10 ****************************************************************************/
11
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <generated/utsrelease.h>
15#include <linux/utsname.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/kthread.h>
19#include <linux/types.h>
20#include <linux/string.h>
21#include <linux/configfs.h>
22#include <linux/ctype.h>
23#include <linux/compat.h>
24#include <linux/eventfd.h>
057cbf49 25#include <linux/fs.h>
5538d294 26#include <linux/vmalloc.h>
057cbf49
NB
27#include <linux/miscdevice.h>
28#include <asm/unaligned.h>
ba929992
BVA
29#include <scsi/scsi_common.h>
30#include <scsi/scsi_proto.h>
057cbf49
NB
31#include <target/target_core_base.h>
32#include <target/target_core_fabric.h>
057cbf49 33#include <linux/vhost.h>
057cbf49 34#include <linux/virtio_scsi.h>
9d6064a3 35#include <linux/llist.h>
1b7f390e 36#include <linux/bitmap.h>
057cbf49 37
057cbf49 38#include "vhost.h"
5012a3a3 39
1a1ff825
NB
40#define VHOST_SCSI_VERSION "v0.1"
41#define VHOST_SCSI_NAMELEN 256
42#define VHOST_SCSI_MAX_CDB_SIZE 32
1a1ff825
NB
43#define VHOST_SCSI_PREALLOC_SGLS 2048
44#define VHOST_SCSI_PREALLOC_UPAGES 2048
864d39df 45#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
5012a3a3 46
e82b9b07
JW
47/* Max number of requests before requeueing the job.
48 * Using this limit prevents one virtqueue from starving others with
49 * request.
50 */
51#define VHOST_SCSI_WEIGHT 256
52
5012a3a3
MT
53struct vhost_scsi_inflight {
54 /* Wait for the flush operation to finish */
55 struct completion comp;
56 /* Refcount for the inflight reqs */
57 struct kref kref;
58};
59
1a1ff825 60struct vhost_scsi_cmd {
5012a3a3
MT
61 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
62 int tvc_vq_desc;
63 /* virtio-scsi initiator task attribute */
64 int tvc_task_attr;
79c14141
NB
65 /* virtio-scsi response incoming iovecs */
66 int tvc_in_iovs;
5012a3a3
MT
67 /* virtio-scsi initiator data direction */
68 enum dma_data_direction tvc_data_direction;
69 /* Expected data transfer length from virtio-scsi header */
70 u32 tvc_exp_data_len;
71 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
72 u64 tvc_tag;
73 /* The number of scatterlists associated with this cmd */
74 u32 tvc_sgl_count;
e31885dd 75 u32 tvc_prot_sgl_count;
6ec29cb8 76 /* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
5012a3a3
MT
77 u32 tvc_lun;
78 /* Pointer to the SGL formatted memory from virtio-scsi */
79 struct scatterlist *tvc_sgl;
b1935f68 80 struct scatterlist *tvc_prot_sgl;
3aee26b4 81 struct page **tvc_upages;
79c14141 82 /* Pointer to response header iovec */
6dd88fd5 83 struct iovec *tvc_resp_iov;
5012a3a3
MT
84 /* Pointer to vhost_scsi for our device */
85 struct vhost_scsi *tvc_vhost;
86 /* Pointer to vhost_virtqueue for the cmd */
87 struct vhost_virtqueue *tvc_vq;
88 /* Pointer to vhost nexus memory */
1a1ff825 89 struct vhost_scsi_nexus *tvc_nexus;
5012a3a3
MT
90 /* The TCM I/O descriptor that is accessed via container_of() */
91 struct se_cmd tvc_se_cmd;
5012a3a3 92 /* Copy of the incoming SCSI command descriptor block (CDB) */
1a1ff825 93 unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
5012a3a3
MT
94 /* Sense buffer that will be mapped into outgoing status */
95 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
96 /* Completed commands list, serviced from vhost worker thread */
97 struct llist_node tvc_completion_list;
98 /* Used to track inflight cmd */
99 struct vhost_scsi_inflight *inflight;
100};
101
1a1ff825 102struct vhost_scsi_nexus {
5012a3a3
MT
103 /* Pointer to TCM session for I_T Nexus */
104 struct se_session *tvn_se_sess;
105};
106
1a1ff825 107struct vhost_scsi_tpg {
5012a3a3
MT
108 /* Vhost port target portal group tag for TCM */
109 u16 tport_tpgt;
110 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
111 int tv_tpg_port_count;
112 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
113 int tv_tpg_vhost_count;
b1d75fe5
NB
114 /* Used for enabling T10-PI with legacy devices */
115 int tv_fabric_prot_type;
1a1ff825 116 /* list for vhost_scsi_list */
5012a3a3
MT
117 struct list_head tv_tpg_list;
118 /* Used to protect access for tpg_nexus */
119 struct mutex tv_tpg_mutex;
120 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
1a1ff825
NB
121 struct vhost_scsi_nexus *tpg_nexus;
122 /* Pointer back to vhost_scsi_tport */
123 struct vhost_scsi_tport *tport;
124 /* Returned by vhost_scsi_make_tpg() */
5012a3a3
MT
125 struct se_portal_group se_tpg;
126 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
127 struct vhost_scsi *vhost_scsi;
128};
129
1a1ff825 130struct vhost_scsi_tport {
5012a3a3
MT
131 /* SCSI protocol the tport is providing */
132 u8 tport_proto_id;
133 /* Binary World Wide unique Port Name for Vhost Target port */
134 u64 tport_wwpn;
135 /* ASCII formatted WWPN for Vhost Target port */
1a1ff825
NB
136 char tport_name[VHOST_SCSI_NAMELEN];
137 /* Returned by vhost_scsi_make_tport() */
5012a3a3
MT
138 struct se_wwn tport_wwn;
139};
140
1a1ff825 141struct vhost_scsi_evt {
5012a3a3
MT
142 /* event to be sent to guest */
143 struct virtio_scsi_event event;
144 /* event list, serviced from vhost worker thread */
145 struct llist_node list;
146};
057cbf49 147
101998f6
NB
148enum {
149 VHOST_SCSI_VQ_CTL = 0,
150 VHOST_SCSI_VQ_EVT = 1,
151 VHOST_SCSI_VQ_IO = 2,
152};
153
e72fd72e 154/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
5dade710 155enum {
95e7c434 156 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
4e9fa50c 157 (1ULL << VIRTIO_SCSI_F_T10_PI)
5dade710
NB
158};
159
1b7f390e 160#define VHOST_SCSI_MAX_TARGET 256
f49c2226 161#define VHOST_SCSI_MAX_IO_VQ 1024
a6c9af87 162#define VHOST_SCSI_MAX_EVENT 128
67e18cf9 163
f49c2226
MC
164static unsigned vhost_scsi_max_io_vqs = 128;
165module_param_named(max_io_vqs, vhost_scsi_max_io_vqs, uint, 0644);
166MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi device can support. The default is 128. The max is 1024.");
167
3ab2e420
AH
168struct vhost_scsi_virtqueue {
169 struct vhost_virtqueue vq;
3dfbff32
MT
170 /*
171 * Reference counting for inflight reqs, used for flush operation. At
172 * each time, one reference tracks new commands submitted, while we
173 * wait for another one to reach 0.
174 */
f2f0173d 175 struct vhost_scsi_inflight inflights[2];
3dfbff32
MT
176 /*
177 * Indicate current inflight in use, protected by vq->mutex.
178 * Writers must also take dev mutex and flush under it.
179 */
f2f0173d 180 int inflight_idx;
25b98b64
MC
181 struct vhost_scsi_cmd *scsi_cmds;
182 struct sbitmap scsi_tags;
183 int max_cmds;
3ab2e420
AH
184};
185
057cbf49 186struct vhost_scsi {
67e18cf9 187 /* Protected by vhost_scsi->dev.mutex */
1a1ff825 188 struct vhost_scsi_tpg **vs_tpg;
67e18cf9 189 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
67e18cf9 190
057cbf49 191 struct vhost_dev dev;
f49c2226
MC
192 struct vhost_scsi_virtqueue *vqs;
193 unsigned long *compl_bitmap;
194 struct vhost_scsi_inflight **old_inflight;
057cbf49
NB
195
196 struct vhost_work vs_completion_work; /* cmd completion work item */
9d6064a3 197 struct llist_head vs_completion_list; /* cmd completion queue */
a6c9af87
AH
198
199 struct vhost_work vs_event_work; /* evt injection work item */
200 struct llist_head vs_event_list; /* evt injection queue */
201
202 bool vs_events_missed; /* any missed events, protected by vq->mutex */
203 int vs_events_nr; /* num of pending events, protected by vq->mutex */
057cbf49
NB
204};
205
efd838fe
MC
206struct vhost_scsi_tmf {
207 struct vhost_work vwork;
efd838fe
MC
208 struct vhost_scsi *vhost;
209 struct vhost_scsi_virtqueue *svq;
efd838fe
MC
210
211 struct se_cmd se_cmd;
b4fffc17 212 u8 scsi_resp;
efd838fe
MC
213 struct vhost_scsi_inflight *inflight;
214 struct iovec resp_iov;
215 int in_iovs;
216 int vq_desc;
217};
218
3f8ca2e1
BM
219/*
220 * Context for processing request and control queue operations.
221 */
222struct vhost_scsi_ctx {
223 int head;
224 unsigned int out, in;
225 size_t req_size, rsp_size;
226 size_t out_size, in_size;
227 u8 *target, *lunp;
228 void *req;
229 struct iov_iter out_iter;
230};
231
bea273c7
MC
232/*
233 * Global mutex to protect vhost_scsi TPG list for vhost IOCTLs and LIO
234 * configfs management operations.
235 */
1a1ff825
NB
236static DEFINE_MUTEX(vhost_scsi_mutex);
237static LIST_HEAD(vhost_scsi_list);
057cbf49 238
1a1ff825 239static void vhost_scsi_done_inflight(struct kref *kref)
f2f0173d
AH
240{
241 struct vhost_scsi_inflight *inflight;
242
243 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
244 complete(&inflight->comp);
245}
246
1a1ff825 247static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
f2f0173d
AH
248 struct vhost_scsi_inflight *old_inflight[])
249{
250 struct vhost_scsi_inflight *new_inflight;
251 struct vhost_virtqueue *vq;
252 int idx, i;
253
f49c2226 254 for (i = 0; i < vs->dev.nvqs; i++) {
f2f0173d
AH
255 vq = &vs->vqs[i].vq;
256
257 mutex_lock(&vq->mutex);
258
259 /* store old infight */
260 idx = vs->vqs[i].inflight_idx;
261 if (old_inflight)
262 old_inflight[i] = &vs->vqs[i].inflights[idx];
263
264 /* setup new infight */
265 vs->vqs[i].inflight_idx = idx ^ 1;
266 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
267 kref_init(&new_inflight->kref);
268 init_completion(&new_inflight->comp);
269
270 mutex_unlock(&vq->mutex);
271 }
272}
273
274static struct vhost_scsi_inflight *
1a1ff825 275vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
f2f0173d
AH
276{
277 struct vhost_scsi_inflight *inflight;
278 struct vhost_scsi_virtqueue *svq;
279
280 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
281 inflight = &svq->inflights[svq->inflight_idx];
282 kref_get(&inflight->kref);
283
284 return inflight;
285}
286
1a1ff825 287static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
f2f0173d 288{
1a1ff825 289 kref_put(&inflight->kref, vhost_scsi_done_inflight);
f2f0173d
AH
290}
291
1a1ff825 292static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
057cbf49
NB
293{
294 return 1;
295}
296
1a1ff825 297static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
057cbf49 298{
1a1ff825
NB
299 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
300 struct vhost_scsi_tpg, se_tpg);
301 struct vhost_scsi_tport *tport = tpg->tport;
057cbf49
NB
302
303 return &tport->tport_name[0];
304}
305
1a1ff825 306static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
057cbf49 307{
1a1ff825
NB
308 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
309 struct vhost_scsi_tpg, se_tpg);
057cbf49
NB
310 return tpg->tport_tpgt;
311}
312
b1d75fe5
NB
313static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
314{
315 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
316 struct vhost_scsi_tpg, se_tpg);
317
318 return tpg->tv_fabric_prot_type;
319}
320
47a3565e 321static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
057cbf49 322{
1a1ff825
NB
323 struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
324 struct vhost_scsi_cmd, tvc_se_cmd);
25b98b64
MC
325 struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
326 struct vhost_scsi_virtqueue, vq);
327 struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
e31885dd 328 int i;
084ed45b
NB
329
330 if (tv_cmd->tvc_sgl_count) {
084ed45b
NB
331 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
332 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
d3d665a6 333 }
e31885dd
NB
334 if (tv_cmd->tvc_prot_sgl_count) {
335 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
336 put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
337 }
084ed45b 338
25b98b64
MC
339 sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
340 vhost_scsi_put_inflight(inflight);
057cbf49
NB
341}
342
efd838fe
MC
343static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
344{
efd838fe
MC
345 struct vhost_scsi_inflight *inflight = tmf->inflight;
346
4c363c81 347 kfree(tmf);
efd838fe
MC
348 vhost_scsi_put_inflight(inflight);
349}
350
47a3565e
MC
351static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
352{
efd838fe
MC
353 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
354 struct vhost_scsi_tmf *tmf = container_of(se_cmd,
355 struct vhost_scsi_tmf, se_cmd);
356
357 vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
358 } else {
359 struct vhost_scsi_cmd *cmd = container_of(se_cmd,
47a3565e 360 struct vhost_scsi_cmd, tvc_se_cmd);
efd838fe 361 struct vhost_scsi *vs = cmd->tvc_vhost;
47a3565e 362
efd838fe
MC
363 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
364 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
365 }
47a3565e
MC
366}
367
1a1ff825 368static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
057cbf49
NB
369{
370 /* Go ahead and process the write immediately */
371 target_execute_cmd(se_cmd);
372 return 0;
373}
374
1a1ff825 375static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
057cbf49 376{
47a3565e 377 transport_generic_free_cmd(se_cmd, 0);
057cbf49
NB
378 return 0;
379}
380
1a1ff825 381static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
057cbf49 382{
47a3565e 383 transport_generic_free_cmd(se_cmd, 0);
057cbf49
NB
384 return 0;
385}
386
1a1ff825 387static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
057cbf49 388{
efd838fe
MC
389 struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
390 se_cmd);
391
b4fffc17 392 tmf->scsi_resp = se_cmd->se_tmr_req->response;
efd838fe 393 transport_generic_free_cmd(&tmf->se_cmd, 0);
057cbf49
NB
394}
395
1a1ff825 396static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
131e6abc
NB
397{
398 return;
399}
400
1a1ff825 401static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
a6c9af87
AH
402{
403 vs->vs_events_nr--;
404 kfree(evt);
405}
406
1a1ff825
NB
407static struct vhost_scsi_evt *
408vhost_scsi_allocate_evt(struct vhost_scsi *vs,
683bd967 409 u32 event, u32 reason)
a6c9af87 410{
3ab2e420 411 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1a1ff825 412 struct vhost_scsi_evt *evt;
a6c9af87
AH
413
414 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
415 vs->vs_events_missed = true;
416 return NULL;
417 }
418
419 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
420 if (!evt) {
1a1ff825 421 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
a6c9af87
AH
422 vs->vs_events_missed = true;
423 return NULL;
424 }
425
e72fd72e
MT
426 evt->event.event = cpu_to_vhost32(vq, event);
427 evt->event.reason = cpu_to_vhost32(vq, reason);
a6c9af87
AH
428 vs->vs_events_nr++;
429
430 return evt;
431}
432
084ed45b
NB
433static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
434{
afc16604 435 return target_put_sess_cmd(se_cmd);
057cbf49
NB
436}
437
683bd967 438static void
1a1ff825 439vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
a6c9af87 440{
3ab2e420 441 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
a6c9af87
AH
442 struct virtio_scsi_event *event = &evt->event;
443 struct virtio_scsi_event __user *eventp;
444 unsigned out, in;
445 int head, ret;
446
247643f8 447 if (!vhost_vq_get_backend(vq)) {
a6c9af87
AH
448 vs->vs_events_missed = true;
449 return;
450 }
451
452again:
453 vhost_disable_notify(&vs->dev, vq);
47283bef 454 head = vhost_get_vq_desc(vq, vq->iov,
a6c9af87
AH
455 ARRAY_SIZE(vq->iov), &out, &in,
456 NULL, NULL);
457 if (head < 0) {
458 vs->vs_events_missed = true;
459 return;
460 }
461 if (head == vq->num) {
462 if (vhost_enable_notify(&vs->dev, vq))
463 goto again;
464 vs->vs_events_missed = true;
465 return;
466 }
467
468 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
469 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
470 vq->iov[out].iov_len);
471 vs->vs_events_missed = true;
472 return;
473 }
474
475 if (vs->vs_events_missed) {
e72fd72e 476 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
a6c9af87
AH
477 vs->vs_events_missed = false;
478 }
479
480 eventp = vq->iov[out].iov_base;
481 ret = __copy_to_user(eventp, event, sizeof(*event));
482 if (!ret)
483 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
484 else
1a1ff825 485 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
a6c9af87
AH
486}
487
1a1ff825 488static void vhost_scsi_evt_work(struct vhost_work *work)
a6c9af87
AH
489{
490 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
491 vs_event_work);
3ab2e420 492 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
12bdcbd5 493 struct vhost_scsi_evt *evt, *t;
a6c9af87
AH
494 struct llist_node *llnode;
495
496 mutex_lock(&vq->mutex);
497 llnode = llist_del_all(&vs->vs_event_list);
12bdcbd5 498 llist_for_each_entry_safe(evt, t, llnode, list) {
1a1ff825
NB
499 vhost_scsi_do_evt_work(vs, evt);
500 vhost_scsi_free_evt(vs, evt);
a6c9af87
AH
501 }
502 mutex_unlock(&vq->mutex);
503}
504
057cbf49
NB
505/* Fill in status and signal that we are done processing this command
506 *
507 * This is scheduled in the vhost work queue so we are called with the owner
508 * process mm and can access the vring.
509 */
510static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
511{
512 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
513 vs_completion_work);
9d6064a3 514 struct virtio_scsi_cmd_resp v_rsp;
816e85ed 515 struct vhost_scsi_cmd *cmd, *t;
9d6064a3
AH
516 struct llist_node *llnode;
517 struct se_cmd *se_cmd;
79c14141 518 struct iov_iter iov_iter;
1b7f390e 519 int ret, vq;
057cbf49 520
f49c2226 521 bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
9d6064a3 522 llnode = llist_del_all(&vs->vs_completion_list);
816e85ed 523 llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
3c63f66a 524 se_cmd = &cmd->tvc_se_cmd;
057cbf49
NB
525
526 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
3c63f66a 527 cmd, se_cmd->residual_count, se_cmd->scsi_status);
057cbf49
NB
528
529 memset(&v_rsp, 0, sizeof(v_rsp));
e72fd72e 530 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
057cbf49
NB
531 /* TODO is status_qualifier field needed? */
532 v_rsp.status = se_cmd->scsi_status;
e72fd72e
MT
533 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
534 se_cmd->scsi_sense_length);
3c63f66a 535 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
e72fd72e 536 se_cmd->scsi_sense_length);
79c14141 537
6dd88fd5 538 iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
79c14141
NB
539 cmd->tvc_in_iovs, sizeof(v_rsp));
540 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
541 if (likely(ret == sizeof(v_rsp))) {
3ab2e420 542 struct vhost_scsi_virtqueue *q;
3c63f66a
AH
543 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
544 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
3ab2e420 545 vq = q - vs->vqs;
f49c2226 546 __set_bit(vq, vs->compl_bitmap);
1b7f390e 547 } else
057cbf49
NB
548 pr_err("Faulted on virtio_scsi_cmd_resp\n");
549
47a3565e 550 vhost_scsi_release_cmd_res(se_cmd);
057cbf49
NB
551 }
552
1b7f390e 553 vq = -1;
f49c2226
MC
554 while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
555 < vs->dev.nvqs)
3ab2e420 556 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
057cbf49
NB
557}
558
1a1ff825 559static struct vhost_scsi_cmd *
25b98b64 560vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
95e7c434
NB
561 unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
562 u32 exp_data_len, int data_direction)
057cbf49 563{
25b98b64
MC
564 struct vhost_scsi_virtqueue *svq = container_of(vq,
565 struct vhost_scsi_virtqueue, vq);
1a1ff825
NB
566 struct vhost_scsi_cmd *cmd;
567 struct vhost_scsi_nexus *tv_nexus;
b1935f68 568 struct scatterlist *sg, *prot_sg;
6dd88fd5 569 struct iovec *tvc_resp_iov;
3aee26b4 570 struct page **pages;
25b98b64 571 int tag;
057cbf49 572
98718312 573 tv_nexus = tpg->tpg_nexus;
057cbf49 574 if (!tv_nexus) {
1a1ff825 575 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
057cbf49
NB
576 return ERR_PTR(-EIO);
577 }
057cbf49 578
c548e62b 579 tag = sbitmap_get(&svq->scsi_tags);
4a47d3a1 580 if (tag < 0) {
1a1ff825 581 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
4a47d3a1
NB
582 return ERR_PTR(-ENOMEM);
583 }
584
25b98b64 585 cmd = &svq->scsi_cmds[tag];
3aee26b4 586 sg = cmd->tvc_sgl;
b1935f68 587 prot_sg = cmd->tvc_prot_sgl;
3aee26b4 588 pages = cmd->tvc_upages;
6dd88fd5 589 tvc_resp_iov = cmd->tvc_resp_iov;
473f0b15 590 memset(cmd, 0, sizeof(*cmd));
3aee26b4 591 cmd->tvc_sgl = sg;
b1935f68 592 cmd->tvc_prot_sgl = prot_sg;
3aee26b4 593 cmd->tvc_upages = pages;
4824d3bf 594 cmd->tvc_se_cmd.map_tag = tag;
95e7c434
NB
595 cmd->tvc_tag = scsi_tag;
596 cmd->tvc_lun = lun;
597 cmd->tvc_task_attr = task_attr;
3c63f66a
AH
598 cmd->tvc_exp_data_len = exp_data_len;
599 cmd->tvc_data_direction = data_direction;
600 cmd->tvc_nexus = tv_nexus;
1a1ff825 601 cmd->inflight = vhost_scsi_get_inflight(vq);
6dd88fd5 602 cmd->tvc_resp_iov = tvc_resp_iov;
3c63f66a 603
1a1ff825 604 memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
95e7c434 605
3c63f66a 606 return cmd;
057cbf49
NB
607}
608
609/*
610 * Map a user memory range into a scatterlist
611 *
612 * Returns the number of scatterlist entries used or -errno on error.
613 */
683bd967 614static int
1a1ff825 615vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
2f240c4a 616 struct iov_iter *iter,
3aee26b4 617 struct scatterlist *sgl,
5a01d082 618 bool write)
057cbf49 619{
b4078b5f 620 struct page **pages = cmd->tvc_upages;
2f240c4a
AV
621 struct scatterlist *sg = sgl;
622 ssize_t bytes;
623 size_t offset;
624 unsigned int npages = 0;
3aee26b4 625
1ef255e2 626 bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
2f240c4a 627 VHOST_SCSI_PREALLOC_UPAGES, &offset);
1810053e 628 /* No pages were pinned */
2f240c4a
AV
629 if (bytes <= 0)
630 return bytes < 0 ? bytes : -EFAULT;
1810053e 631
2f240c4a
AV
632 while (bytes) {
633 unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
634 sg_set_page(sg++, pages[npages++], n, offset);
635 bytes -= n;
636 offset = 0;
637 }
638 return npages;
057cbf49
NB
639}
640
683bd967 641static int
e8de56b5 642vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
057cbf49 643{
e8de56b5 644 int sgl_count = 0;
057cbf49 645
de4f5fed 646 if (!iter || !iter_iov(iter)) {
e8de56b5
NB
647 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
648 " present\n", __func__, bytes);
649 return -EINVAL;
650 }
f3158f36 651
e8de56b5
NB
652 sgl_count = iov_iter_npages(iter, 0xffff);
653 if (sgl_count > max_sgls) {
654 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
655 " max_sgls: %d\n", __func__, sgl_count, max_sgls);
656 return -EINVAL;
5a01d082 657 }
e8de56b5
NB
658 return sgl_count;
659}
057cbf49 660
e8de56b5 661static int
1a1ff825
NB
662vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
663 struct iov_iter *iter,
664 struct scatterlist *sg, int sg_count)
e8de56b5 665{
11d49e9d 666 struct scatterlist *p = sg;
2f240c4a 667 int ret;
5a01d082 668
2f240c4a
AV
669 while (iov_iter_count(iter)) {
670 ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
057cbf49 671 if (ret < 0) {
11d49e9d
AV
672 while (p < sg) {
673 struct page *page = sg_page(p++);
e8de56b5
NB
674 if (page)
675 put_page(page);
676 }
057cbf49
NB
677 return ret;
678 }
057cbf49 679 sg += ret;
057cbf49
NB
680 }
681 return 0;
682}
683
e31885dd 684static int
1a1ff825 685vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
e8de56b5
NB
686 size_t prot_bytes, struct iov_iter *prot_iter,
687 size_t data_bytes, struct iov_iter *data_iter)
688{
689 int sgl_count, ret;
690 bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
691
692 if (prot_bytes) {
693 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
1a1ff825 694 VHOST_SCSI_PREALLOC_PROT_SGLS);
e8de56b5
NB
695 if (sgl_count < 0)
696 return sgl_count;
697
698 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
699 cmd->tvc_prot_sgl_count = sgl_count;
700 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
701 cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
702
703 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
704 cmd->tvc_prot_sgl,
705 cmd->tvc_prot_sgl_count);
e31885dd 706 if (ret < 0) {
e31885dd
NB
707 cmd->tvc_prot_sgl_count = 0;
708 return ret;
709 }
e8de56b5
NB
710 }
711 sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
1a1ff825 712 VHOST_SCSI_PREALLOC_SGLS);
e8de56b5
NB
713 if (sgl_count < 0)
714 return sgl_count;
715
716 sg_init_table(cmd->tvc_sgl, sgl_count);
717 cmd->tvc_sgl_count = sgl_count;
718 pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
719 cmd->tvc_sgl, cmd->tvc_sgl_count);
720
721 ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
722 cmd->tvc_sgl, cmd->tvc_sgl_count);
723 if (ret < 0) {
724 cmd->tvc_sgl_count = 0;
725 return ret;
e31885dd
NB
726 }
727 return 0;
728}
729
46243860
NB
730static int vhost_scsi_to_tcm_attr(int attr)
731{
732 switch (attr) {
733 case VIRTIO_SCSI_S_SIMPLE:
734 return TCM_SIMPLE_TAG;
735 case VIRTIO_SCSI_S_ORDERED:
736 return TCM_ORDERED_TAG;
737 case VIRTIO_SCSI_S_HEAD:
738 return TCM_HEAD_TAG;
739 case VIRTIO_SCSI_S_ACA:
740 return TCM_ACA_TAG;
741 default:
742 break;
743 }
744 return TCM_SIMPLE_TAG;
745}
746
6ec29cb8 747static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
057cbf49 748{
3c63f66a 749 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
6ec29cb8 750 struct vhost_scsi_nexus *tv_nexus;
95e7c434 751 struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
057cbf49 752
95e7c434 753 /* FIXME: BIDI operation */
3c63f66a
AH
754 if (cmd->tvc_sgl_count) {
755 sg_ptr = cmd->tvc_sgl;
95e7c434
NB
756
757 if (cmd->tvc_prot_sgl_count)
758 sg_prot_ptr = cmd->tvc_prot_sgl;
759 else
760 se_cmd->prot_pto = true;
057cbf49
NB
761 } else {
762 sg_ptr = NULL;
763 }
3c63f66a 764 tv_nexus = cmd->tvc_nexus;
9f0abc15 765
649ee054 766 se_cmd->tag = 0;
eb929804 767 target_init_cmd(se_cmd, tv_nexus->tvn_se_sess, &cmd->tvc_sense_buf[0],
3c63f66a 768 cmd->tvc_lun, cmd->tvc_exp_data_len,
46243860 769 vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
eb929804
MC
770 cmd->tvc_data_direction, TARGET_SCF_ACK_KREF);
771
772 if (target_submit_prep(se_cmd, cmd->tvc_cdb, sg_ptr,
773 cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
08694199 774 cmd->tvc_prot_sgl_count, GFP_KERNEL))
eb929804
MC
775 return;
776
6ec29cb8 777 target_queue_submission(se_cmd);
057cbf49
NB
778}
779
683bd967
AH
780static void
781vhost_scsi_send_bad_target(struct vhost_scsi *vs,
782 struct vhost_virtqueue *vq,
783 int head, unsigned out)
637ab21e
AH
784{
785 struct virtio_scsi_cmd_resp __user *resp;
786 struct virtio_scsi_cmd_resp rsp;
787 int ret;
788
789 memset(&rsp, 0, sizeof(rsp));
790 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
791 resp = vq->iov[out].iov_base;
792 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
793 if (!ret)
794 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
795 else
796 pr_err("Faulted on virtio_scsi_cmd_resp\n");
797}
798
09d75832
BM
799static int
800vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
801 struct vhost_scsi_ctx *vc)
802{
803 int ret = -ENXIO;
804
805 vc->head = vhost_get_vq_desc(vq, vq->iov,
806 ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
807 NULL, NULL);
808
809 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
810 vc->head, vc->out, vc->in);
811
812 /* On error, stop handling until the next kick. */
813 if (unlikely(vc->head < 0))
814 goto done;
815
816 /* Nothing new? Wait for eventfd to tell us they refilled. */
817 if (vc->head == vq->num) {
818 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
819 vhost_disable_notify(&vs->dev, vq);
820 ret = -EAGAIN;
821 }
822 goto done;
823 }
824
825 /*
826 * Get the size of request and response buffers.
827 * FIXME: Not correct for BIDI operation
828 */
829 vc->out_size = iov_length(vq->iov, vc->out);
830 vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
831
832 /*
833 * Copy over the virtio-scsi request header, which for a
834 * ANY_LAYOUT enabled guest may span multiple iovecs, or a
835 * single iovec may contain both the header + outgoing
836 * WRITE payloads.
837 *
838 * copy_from_iter() will advance out_iter, so that it will
839 * point at the start of the outgoing WRITE payload, if
840 * DMA_TO_DEVICE is set.
841 */
de4eda9d 842 iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size);
09d75832
BM
843 ret = 0;
844
845done:
846 return ret;
847}
848
849static int
850vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
851{
852 if (unlikely(vc->in_size < vc->rsp_size)) {
853 vq_err(vq,
854 "Response buf too small, need min %zu bytes got %zu",
855 vc->rsp_size, vc->in_size);
856 return -EINVAL;
857 } else if (unlikely(vc->out_size < vc->req_size)) {
858 vq_err(vq,
859 "Request buf too small, need min %zu bytes got %zu",
860 vc->req_size, vc->out_size);
861 return -EIO;
862 }
863
864 return 0;
865}
866
867static int
868vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
869 struct vhost_scsi_tpg **tpgp)
870{
871 int ret = -EIO;
872
873 if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
874 &vc->out_iter))) {
a691ffb4 875 vq_err(vq, "Faulted on copy_from_iter_full\n");
09d75832
BM
876 } else if (unlikely(*vc->lunp != 1)) {
877 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
878 vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
879 } else {
880 struct vhost_scsi_tpg **vs_tpg, *tpg;
881
247643f8 882 vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
09d75832
BM
883
884 tpg = READ_ONCE(vs_tpg[*vc->target]);
885 if (unlikely(!tpg)) {
886 vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
887 } else {
888 if (tpgp)
889 *tpgp = tpg;
890 ret = 0;
891 }
892 }
893
894 return ret;
895}
896
18f1becb
MC
897static u16 vhost_buf_to_lun(u8 *lun_buf)
898{
899 return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
900}
901
683bd967
AH
902static void
903vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
057cbf49 904{
1a1ff825 905 struct vhost_scsi_tpg **vs_tpg, *tpg;
057cbf49 906 struct virtio_scsi_cmd_req v_req;
95e7c434 907 struct virtio_scsi_cmd_req_pi v_req_pi;
09d75832 908 struct vhost_scsi_ctx vc;
1a1ff825 909 struct vhost_scsi_cmd *cmd;
09d75832 910 struct iov_iter in_iter, prot_iter, data_iter;
95e7c434 911 u64 tag;
09b13fa8 912 u32 exp_data_len, data_direction;
6dd88fd5 913 int ret, prot_bytes, i, c = 0;
95e7c434 914 u16 lun;
09d75832 915 u8 task_attr;
09b13fa8 916 bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
09d75832 917 void *cdb;
057cbf49 918
e7802212 919 mutex_lock(&vq->mutex);
4f7f46d3
AH
920 /*
921 * We can handle the vq only after the endpoint is setup by calling the
922 * VHOST_SCSI_SET_ENDPOINT ioctl.
4f7f46d3 923 */
247643f8 924 vs_tpg = vhost_vq_get_backend(vq);
4f7f46d3 925 if (!vs_tpg)
e7802212 926 goto out;
057cbf49 927
09d75832
BM
928 memset(&vc, 0, sizeof(vc));
929 vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
930
057cbf49
NB
931 vhost_disable_notify(&vs->dev, vq);
932
c1ea02f1 933 do {
09d75832
BM
934 ret = vhost_scsi_get_desc(vs, vq, &vc);
935 if (ret)
936 goto err;
937
09b13fa8
NB
938 /*
939 * Setup pointers and values based upon different virtio-scsi
940 * request header if T10_PI is enabled in KVM guest.
941 */
942 if (t10_pi) {
09d75832
BM
943 vc.req = &v_req_pi;
944 vc.req_size = sizeof(v_req_pi);
945 vc.lunp = &v_req_pi.lun[0];
946 vc.target = &v_req_pi.lun[1];
95e7c434 947 } else {
09d75832
BM
948 vc.req = &v_req;
949 vc.req_size = sizeof(v_req);
950 vc.lunp = &v_req.lun[0];
951 vc.target = &v_req.lun[1];
95e7c434
NB
952 }
953
09b13fa8 954 /*
09d75832
BM
955 * Validate the size of request and response buffers.
956 * Check for a sane response buffer so we can report
957 * early errors back to the guest.
09b13fa8 958 */
09d75832
BM
959 ret = vhost_scsi_chk_size(vq, &vc);
960 if (ret)
961 goto err;
057cbf49 962
09d75832
BM
963 ret = vhost_scsi_get_req(vq, &vc, &tpg);
964 if (ret)
965 goto err;
966
967 ret = -EIO; /* bad target on any error from here on */
7fe412d0 968
95e7c434 969 /*
09b13fa8
NB
970 * Determine data_direction by calculating the total outgoing
971 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
972 * response headers respectively.
95e7c434 973 *
09b13fa8
NB
974 * For DMA_TO_DEVICE this is out_iter, which is already pointing
975 * to the right place.
976 *
977 * For DMA_FROM_DEVICE, the iovec will be just past the end
978 * of the virtio-scsi response header in either the same
979 * or immediately following iovec.
95e7c434 980 *
09b13fa8
NB
981 * Any associated T10_PI bytes for the outgoing / incoming
982 * payloads are included in calculation of exp_data_len here.
95e7c434 983 */
09b13fa8
NB
984 prot_bytes = 0;
985
09d75832 986 if (vc.out_size > vc.req_size) {
09b13fa8 987 data_direction = DMA_TO_DEVICE;
09d75832
BM
988 exp_data_len = vc.out_size - vc.req_size;
989 data_iter = vc.out_iter;
990 } else if (vc.in_size > vc.rsp_size) {
09b13fa8 991 data_direction = DMA_FROM_DEVICE;
09d75832 992 exp_data_len = vc.in_size - vc.rsp_size;
09b13fa8 993
de4eda9d 994 iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in,
09d75832
BM
995 vc.rsp_size + exp_data_len);
996 iov_iter_advance(&in_iter, vc.rsp_size);
09b13fa8
NB
997 data_iter = in_iter;
998 } else {
999 data_direction = DMA_NONE;
1000 exp_data_len = 0;
1001 }
1002 /*
1003 * If T10_PI header + payload is present, setup prot_iter values
1004 * and recalculate data_iter for vhost_scsi_mapal() mapping to
1005 * host scatterlists via get_user_pages_fast().
95e7c434 1006 */
09b13fa8 1007 if (t10_pi) {
95e7c434
NB
1008 if (v_req_pi.pi_bytesout) {
1009 if (data_direction != DMA_TO_DEVICE) {
09b13fa8
NB
1010 vq_err(vq, "Received non zero pi_bytesout,"
1011 " but wrong data_direction\n");
09d75832 1012 goto err;
95e7c434 1013 }
e72fd72e 1014 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
95e7c434
NB
1015 } else if (v_req_pi.pi_bytesin) {
1016 if (data_direction != DMA_FROM_DEVICE) {
09b13fa8
NB
1017 vq_err(vq, "Received non zero pi_bytesin,"
1018 " but wrong data_direction\n");
09d75832 1019 goto err;
95e7c434 1020 }
e72fd72e 1021 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
95e7c434 1022 }
09b13fa8 1023 /*
4542d623
GE
1024 * Set prot_iter to data_iter and truncate it to
1025 * prot_bytes, and advance data_iter past any
09b13fa8
NB
1026 * preceeding prot_bytes that may be present.
1027 *
1028 * Also fix up the exp_data_len to reflect only the
1029 * actual data payload length.
1030 */
95e7c434 1031 if (prot_bytes) {
09b13fa8
NB
1032 exp_data_len -= prot_bytes;
1033 prot_iter = data_iter;
4542d623 1034 iov_iter_truncate(&prot_iter, prot_bytes);
09b13fa8 1035 iov_iter_advance(&data_iter, prot_bytes);
95e7c434 1036 }
e72fd72e 1037 tag = vhost64_to_cpu(vq, v_req_pi.tag);
95e7c434
NB
1038 task_attr = v_req_pi.task_attr;
1039 cdb = &v_req_pi.cdb[0];
18f1becb 1040 lun = vhost_buf_to_lun(v_req_pi.lun);
95e7c434 1041 } else {
e72fd72e 1042 tag = vhost64_to_cpu(vq, v_req.tag);
95e7c434
NB
1043 task_attr = v_req.task_attr;
1044 cdb = &v_req.cdb[0];
18f1becb 1045 lun = vhost_buf_to_lun(v_req.lun);
95e7c434 1046 }
95e7c434 1047 /*
09b13fa8
NB
1048 * Check that the received CDB size does not exceeded our
1049 * hardcoded max for vhost-scsi, then get a pre-allocated
1050 * cmd descriptor for the new virtio-scsi tag.
95e7c434
NB
1051 *
1052 * TODO what if cdb was too small for varlen cdb header?
1053 */
1a1ff825 1054 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
95e7c434
NB
1055 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1056 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1a1ff825 1057 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
09d75832 1058 goto err;
95e7c434 1059 }
25b98b64 1060 cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
9f977ef7
NB
1061 exp_data_len + prot_bytes,
1062 data_direction);
3c63f66a 1063 if (IS_ERR(cmd)) {
25b98b64 1064 vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
09b13fa8 1065 PTR_ERR(cmd));
09d75832 1066 goto err;
057cbf49 1067 }
3c63f66a
AH
1068 cmd->tvc_vhost = vs;
1069 cmd->tvc_vq = vq;
6dd88fd5
JW
1070 for (i = 0; i < vc.in ; i++)
1071 cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
09d75832 1072 cmd->tvc_in_iovs = vc.in;
057cbf49 1073
057cbf49 1074 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
09b13fa8
NB
1075 cmd->tvc_cdb[0], cmd->tvc_lun);
1076 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1077 " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
057cbf49
NB
1078
1079 if (data_direction != DMA_NONE) {
09d75832
BM
1080 if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
1081 &prot_iter, exp_data_len,
1082 &data_iter))) {
057cbf49 1083 vq_err(vq, "Failed to map iov to sgl\n");
47a3565e 1084 vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
09d75832 1085 goto err;
057cbf49
NB
1086 }
1087 }
057cbf49
NB
1088 /*
1089 * Save the descriptor from vhost_get_vq_desc() to be used to
1090 * complete the virtio-scsi request in TCM callback context via
09b13fa8 1091 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
057cbf49 1092 */
09d75832 1093 cmd->tvc_vq_desc = vc.head;
6ec29cb8 1094 vhost_scsi_target_queue_cmd(cmd);
09d75832
BM
1095 ret = 0;
1096err:
1097 /*
1098 * ENXIO: No more requests, or read error, wait for next kick
1099 * EINVAL: Invalid response buffer, drop the request
1100 * EIO: Respond with bad target
1101 * EAGAIN: Pending request
1102 */
1103 if (ret == -ENXIO)
1104 break;
1105 else if (ret == -EIO)
1106 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
c1ea02f1 1107 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
e7802212 1108out:
7ea206cf 1109 mutex_unlock(&vq->mutex);
057cbf49
NB
1110}
1111
0d02dbd6 1112static void
efd838fe
MC
1113vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
1114 int in_iovs, int vq_desc, struct iovec *resp_iov,
1115 int tmf_resp_code)
0d02dbd6 1116{
0d02dbd6 1117 struct virtio_scsi_ctrl_tmf_resp rsp;
8e5dadfe 1118 struct iov_iter iov_iter;
0d02dbd6
BM
1119 int ret;
1120
1121 pr_debug("%s\n", __func__);
1122 memset(&rsp, 0, sizeof(rsp));
efd838fe 1123 rsp.response = tmf_resp_code;
8e5dadfe 1124
de4eda9d 1125 iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp));
8e5dadfe
BM
1126
1127 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1128 if (likely(ret == sizeof(rsp)))
efd838fe 1129 vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
0d02dbd6
BM
1130 else
1131 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
1132}
1133
efd838fe
MC
1134static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
1135{
1136 struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
1137 vwork);
1138 int resp_code;
1139
b4fffc17 1140 if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
efd838fe
MC
1141 resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
1142 else
1143 resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1144
1145 vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
1146 tmf->vq_desc, &tmf->resp_iov, resp_code);
1147 vhost_scsi_release_tmf_res(tmf);
1148}
1149
1150static void
1151vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1152 struct vhost_virtqueue *vq,
1153 struct virtio_scsi_ctrl_tmf_req *vtmf,
1154 struct vhost_scsi_ctx *vc)
1155{
1156 struct vhost_scsi_virtqueue *svq = container_of(vq,
1157 struct vhost_scsi_virtqueue, vq);
1158 struct vhost_scsi_tmf *tmf;
1159
1160 if (vhost32_to_cpu(vq, vtmf->subtype) !=
1161 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
1162 goto send_reject;
1163
1164 if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1165 pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
1166 goto send_reject;
1167 }
1168
4c363c81
MC
1169 tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
1170 if (!tmf)
efd838fe 1171 goto send_reject;
efd838fe 1172
4c363c81 1173 vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
efd838fe
MC
1174 tmf->vhost = vs;
1175 tmf->svq = svq;
1176 tmf->resp_iov = vq->iov[vc->out];
1177 tmf->vq_desc = vc->head;
1178 tmf->in_iovs = vc->in;
1179 tmf->inflight = vhost_scsi_get_inflight(vq);
1180
1181 if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1182 vhost_buf_to_lun(vtmf->lun), NULL,
1183 TMR_LUN_RESET, GFP_KERNEL, 0,
1184 TARGET_SCF_ACK_KREF) < 0) {
1185 vhost_scsi_release_tmf_res(tmf);
1186 goto send_reject;
1187 }
1188
1189 return;
1190
1191send_reject:
1192 vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
1193 VIRTIO_SCSI_S_FUNCTION_REJECTED);
1194}
1195
0d02dbd6
BM
1196static void
1197vhost_scsi_send_an_resp(struct vhost_scsi *vs,
3f8ca2e1
BM
1198 struct vhost_virtqueue *vq,
1199 struct vhost_scsi_ctx *vc)
0d02dbd6 1200{
0d02dbd6 1201 struct virtio_scsi_ctrl_an_resp rsp;
8e5dadfe 1202 struct iov_iter iov_iter;
0d02dbd6
BM
1203 int ret;
1204
1205 pr_debug("%s\n", __func__);
1206 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1207 rsp.response = VIRTIO_SCSI_S_OK;
8e5dadfe 1208
de4eda9d 1209 iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp));
8e5dadfe
BM
1210
1211 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1212 if (likely(ret == sizeof(rsp)))
3f8ca2e1 1213 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
0d02dbd6
BM
1214 else
1215 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
1216}
1217
1218static void
1219vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1220{
efd838fe 1221 struct vhost_scsi_tpg *tpg;
0d02dbd6
BM
1222 union {
1223 __virtio32 type;
1224 struct virtio_scsi_ctrl_an_req an;
1225 struct virtio_scsi_ctrl_tmf_req tmf;
1226 } v_req;
3f8ca2e1
BM
1227 struct vhost_scsi_ctx vc;
1228 size_t typ_size;
c1ea02f1 1229 int ret, c = 0;
0d02dbd6
BM
1230
1231 mutex_lock(&vq->mutex);
1232 /*
1233 * We can handle the vq only after the endpoint is setup by calling the
1234 * VHOST_SCSI_SET_ENDPOINT ioctl.
1235 */
247643f8 1236 if (!vhost_vq_get_backend(vq))
0d02dbd6
BM
1237 goto out;
1238
3f8ca2e1
BM
1239 memset(&vc, 0, sizeof(vc));
1240
0d02dbd6
BM
1241 vhost_disable_notify(&vs->dev, vq);
1242
c1ea02f1 1243 do {
3f8ca2e1
BM
1244 ret = vhost_scsi_get_desc(vs, vq, &vc);
1245 if (ret)
1246 goto err;
0d02dbd6
BM
1247
1248 /*
3f8ca2e1
BM
1249 * Get the request type first in order to setup
1250 * other parameters dependent on the type.
0d02dbd6 1251 */
3f8ca2e1 1252 vc.req = &v_req.type;
0d02dbd6
BM
1253 typ_size = sizeof(v_req.type);
1254
3f8ca2e1
BM
1255 if (unlikely(!copy_from_iter_full(vc.req, typ_size,
1256 &vc.out_iter))) {
0d02dbd6
BM
1257 vq_err(vq, "Faulted on copy_from_iter tmf type\n");
1258 /*
3f8ca2e1
BM
1259 * The size of the response buffer depends on the
1260 * request type and must be validated against it.
0d02dbd6
BM
1261 * Since the request type is not known, don't send
1262 * a response.
1263 */
1264 continue;
1265 }
1266
295c1b98 1267 switch (vhost32_to_cpu(vq, v_req.type)) {
0d02dbd6 1268 case VIRTIO_SCSI_T_TMF:
3f8ca2e1
BM
1269 vc.req = &v_req.tmf;
1270 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1271 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
1272 vc.lunp = &v_req.tmf.lun[0];
1273 vc.target = &v_req.tmf.lun[1];
0d02dbd6
BM
1274 break;
1275 case VIRTIO_SCSI_T_AN_QUERY:
1276 case VIRTIO_SCSI_T_AN_SUBSCRIBE:
3f8ca2e1
BM
1277 vc.req = &v_req.an;
1278 vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
1279 vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
1280 vc.lunp = &v_req.an.lun[0];
1281 vc.target = NULL;
0d02dbd6
BM
1282 break;
1283 default:
1284 vq_err(vq, "Unknown control request %d", v_req.type);
1285 continue;
1286 }
1287
1288 /*
3f8ca2e1
BM
1289 * Validate the size of request and response buffers.
1290 * Check for a sane response buffer so we can report
1291 * early errors back to the guest.
0d02dbd6 1292 */
3f8ca2e1
BM
1293 ret = vhost_scsi_chk_size(vq, &vc);
1294 if (ret)
1295 goto err;
0d02dbd6 1296
3f8ca2e1
BM
1297 /*
1298 * Get the rest of the request now that its size is known.
1299 */
1300 vc.req += typ_size;
1301 vc.req_size -= typ_size;
0d02dbd6 1302
efd838fe 1303 ret = vhost_scsi_get_req(vq, &vc, &tpg);
3f8ca2e1
BM
1304 if (ret)
1305 goto err;
0d02dbd6 1306
3f8ca2e1 1307 if (v_req.type == VIRTIO_SCSI_T_TMF)
efd838fe 1308 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
3f8ca2e1
BM
1309 else
1310 vhost_scsi_send_an_resp(vs, vq, &vc);
1311err:
1312 /*
1313 * ENXIO: No more requests, or read error, wait for next kick
1314 * EINVAL: Invalid response buffer, drop the request
1315 * EIO: Respond with bad target
1316 * EAGAIN: Pending request
1317 */
1318 if (ret == -ENXIO)
1319 break;
1320 else if (ret == -EIO)
1321 vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
c1ea02f1 1322 } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
0d02dbd6
BM
1323out:
1324 mutex_unlock(&vq->mutex);
1325}
1326
057cbf49
NB
1327static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1328{
0d02dbd6
BM
1329 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1330 poll.work);
1331 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1332
101998f6 1333 pr_debug("%s: The handling func for control queue.\n", __func__);
0d02dbd6 1334 vhost_scsi_ctl_handle_vq(vs, vq);
057cbf49
NB
1335}
1336
683bd967 1337static void
1a1ff825
NB
1338vhost_scsi_send_evt(struct vhost_scsi *vs,
1339 struct vhost_scsi_tpg *tpg,
683bd967
AH
1340 struct se_lun *lun,
1341 u32 event,
1342 u32 reason)
a6c9af87 1343{
1a1ff825 1344 struct vhost_scsi_evt *evt;
a6c9af87 1345
1a1ff825 1346 evt = vhost_scsi_allocate_evt(vs, event, reason);
a6c9af87
AH
1347 if (!evt)
1348 return;
1349
1350 if (tpg && lun) {
1351 /* TODO: share lun setup code with virtio-scsi.ko */
1352 /*
1353 * Note: evt->event is zeroed when we allocate it and
1354 * lun[4-7] need to be zero according to virtio-scsi spec.
1355 */
1356 evt->event.lun[0] = 0x01;
59c816c1 1357 evt->event.lun[1] = tpg->tport_tpgt;
a6c9af87
AH
1358 if (lun->unpacked_lun >= 256)
1359 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1360 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1361 }
1362
1363 llist_add(&evt->list, &vs->vs_event_list);
1364 vhost_work_queue(&vs->dev, &vs->vs_event_work);
1365}
1366
057cbf49
NB
1367static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1368{
a6c9af87
AH
1369 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1370 poll.work);
1371 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1372
1373 mutex_lock(&vq->mutex);
247643f8 1374 if (!vhost_vq_get_backend(vq))
a6c9af87
AH
1375 goto out;
1376
1377 if (vs->vs_events_missed)
1a1ff825 1378 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
a6c9af87
AH
1379out:
1380 mutex_unlock(&vq->mutex);
057cbf49
NB
1381}
1382
1383static void vhost_scsi_handle_kick(struct vhost_work *work)
1384{
1385 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1386 poll.work);
1387 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1388
1b7f390e 1389 vhost_scsi_handle_vq(vs, vq);
057cbf49
NB
1390}
1391
3dfbff32 1392/* Callers must hold dev mutex */
4f7f46d3
AH
1393static void vhost_scsi_flush(struct vhost_scsi *vs)
1394{
1395 int i;
1396
f2f0173d 1397 /* Init new inflight and remember the old inflight */
f49c2226 1398 vhost_scsi_init_inflight(vs, vs->old_inflight);
f2f0173d
AH
1399
1400 /*
1401 * The inflight->kref was initialized to 1. We decrement it here to
1402 * indicate the start of the flush operation so that it will reach 0
1403 * when all the reqs are finished.
1404 */
f49c2226
MC
1405 for (i = 0; i < vs->dev.nvqs; i++)
1406 kref_put(&vs->old_inflight[i]->kref, vhost_scsi_done_inflight);
f2f0173d
AH
1407
1408 /* Flush both the vhost poll and vhost work */
b2ffa407 1409 vhost_dev_flush(&vs->dev);
f2f0173d
AH
1410
1411 /* Wait for all reqs issued before the flush to be finished */
f49c2226
MC
1412 for (i = 0; i < vs->dev.nvqs; i++)
1413 wait_for_completion(&vs->old_inflight[i]->comp);
4f7f46d3
AH
1414}
1415
25b98b64
MC
1416static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1417{
1418 struct vhost_scsi_virtqueue *svq = container_of(vq,
1419 struct vhost_scsi_virtqueue, vq);
1420 struct vhost_scsi_cmd *tv_cmd;
1421 unsigned int i;
1422
1423 if (!svq->scsi_cmds)
1424 return;
1425
1426 for (i = 0; i < svq->max_cmds; i++) {
1427 tv_cmd = &svq->scsi_cmds[i];
1428
1429 kfree(tv_cmd->tvc_sgl);
1430 kfree(tv_cmd->tvc_prot_sgl);
1431 kfree(tv_cmd->tvc_upages);
6dd88fd5 1432 kfree(tv_cmd->tvc_resp_iov);
25b98b64
MC
1433 }
1434
1435 sbitmap_free(&svq->scsi_tags);
1436 kfree(svq->scsi_cmds);
1437 svq->scsi_cmds = NULL;
1438}
1439
1440static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
1441{
1442 struct vhost_scsi_virtqueue *svq = container_of(vq,
1443 struct vhost_scsi_virtqueue, vq);
1444 struct vhost_scsi_cmd *tv_cmd;
1445 unsigned int i;
1446
1447 if (svq->scsi_cmds)
1448 return 0;
1449
1450 if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
c548e62b 1451 NUMA_NO_NODE, false, true))
25b98b64
MC
1452 return -ENOMEM;
1453 svq->max_cmds = max_cmds;
1454
1455 svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
1456 if (!svq->scsi_cmds) {
1457 sbitmap_free(&svq->scsi_tags);
1458 return -ENOMEM;
1459 }
1460
1461 for (i = 0; i < max_cmds; i++) {
1462 tv_cmd = &svq->scsi_cmds[i];
1463
1464 tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
1465 sizeof(struct scatterlist),
1466 GFP_KERNEL);
1467 if (!tv_cmd->tvc_sgl) {
1468 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1469 goto out;
1470 }
1471
1472 tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
1473 sizeof(struct page *),
1474 GFP_KERNEL);
1475 if (!tv_cmd->tvc_upages) {
1476 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1477 goto out;
1478 }
1479
6dd88fd5
JW
1480 tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
1481 sizeof(struct iovec),
1482 GFP_KERNEL);
1483 if (!tv_cmd->tvc_resp_iov) {
1484 pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
1485 goto out;
1486 }
1487
25b98b64
MC
1488 tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
1489 sizeof(struct scatterlist),
1490 GFP_KERNEL);
1491 if (!tv_cmd->tvc_prot_sgl) {
1492 pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1493 goto out;
1494 }
1495 }
1496 return 0;
1497out:
1498 vhost_scsi_destroy_vq_cmds(vq);
1499 return -ENOMEM;
1500}
1501
057cbf49
NB
1502/*
1503 * Called from vhost_scsi_ioctl() context to walk the list of available
1a1ff825 1504 * vhost_scsi_tpg with an active struct vhost_scsi_nexus
f2b7daf5
AH
1505 *
1506 * The lock nesting rule is:
bea273c7 1507 * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
057cbf49 1508 */
683bd967
AH
1509static int
1510vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1511 struct vhost_scsi_target *t)
057cbf49 1512{
ab8edab1 1513 struct se_portal_group *se_tpg;
1a1ff825
NB
1514 struct vhost_scsi_tport *tv_tport;
1515 struct vhost_scsi_tpg *tpg;
1516 struct vhost_scsi_tpg **vs_tpg;
4f7f46d3
AH
1517 struct vhost_virtqueue *vq;
1518 int index, ret, i, len;
67e18cf9 1519 bool match = false;
057cbf49
NB
1520
1521 mutex_lock(&vs->dev.mutex);
f2b7daf5 1522
057cbf49
NB
1523 /* Verify that ring has been setup correctly. */
1524 for (index = 0; index < vs->dev.nvqs; ++index) {
1525 /* Verify that ring has been setup correctly. */
3ab2e420 1526 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
f2b7daf5
AH
1527 ret = -EFAULT;
1528 goto out;
057cbf49
NB
1529 }
1530 }
057cbf49 1531
4f7f46d3
AH
1532 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1533 vs_tpg = kzalloc(len, GFP_KERNEL);
1534 if (!vs_tpg) {
f2b7daf5
AH
1535 ret = -ENOMEM;
1536 goto out;
4f7f46d3
AH
1537 }
1538 if (vs->vs_tpg)
1539 memcpy(vs_tpg, vs->vs_tpg, len);
1540
bea273c7 1541 mutex_lock(&vhost_scsi_mutex);
1a1ff825 1542 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
98718312
AH
1543 mutex_lock(&tpg->tv_tpg_mutex);
1544 if (!tpg->tpg_nexus) {
1545 mutex_unlock(&tpg->tv_tpg_mutex);
057cbf49
NB
1546 continue;
1547 }
98718312
AH
1548 if (tpg->tv_tpg_vhost_count != 0) {
1549 mutex_unlock(&tpg->tv_tpg_mutex);
057cbf49
NB
1550 continue;
1551 }
98718312 1552 tv_tport = tpg->tport;
057cbf49 1553
67e18cf9 1554 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
98718312 1555 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
98718312 1556 mutex_unlock(&tpg->tv_tpg_mutex);
bea273c7 1557 mutex_unlock(&vhost_scsi_mutex);
f2b7daf5 1558 ret = -EEXIST;
25b98b64 1559 goto undepend;
101998f6 1560 }
ab8edab1
NB
1561 /*
1562 * In order to ensure individual vhost-scsi configfs
1563 * groups cannot be removed while in use by vhost ioctl,
1564 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1565 * dependency now.
1566 */
1567 se_tpg = &tpg->se_tpg;
d588cf8f 1568 ret = target_depend_item(&se_tpg->tpg_group.cg_item);
ab8edab1 1569 if (ret) {
a691ffb4 1570 pr_warn("target_depend_item() failed: %d\n", ret);
ab8edab1 1571 mutex_unlock(&tpg->tv_tpg_mutex);
bea273c7 1572 mutex_unlock(&vhost_scsi_mutex);
25b98b64 1573 goto undepend;
ab8edab1 1574 }
98718312
AH
1575 tpg->tv_tpg_vhost_count++;
1576 tpg->vhost_scsi = vs;
1577 vs_tpg[tpg->tport_tpgt] = tpg;
67e18cf9 1578 match = true;
057cbf49 1579 }
98718312 1580 mutex_unlock(&tpg->tv_tpg_mutex);
057cbf49 1581 }
bea273c7 1582 mutex_unlock(&vhost_scsi_mutex);
67e18cf9
AH
1583
1584 if (match) {
1585 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1586 sizeof(vs->vs_vhost_wwpn));
25b98b64 1587
f49c2226 1588 for (i = VHOST_SCSI_VQ_IO; i < vs->dev.nvqs; i++) {
25b98b64
MC
1589 vq = &vs->vqs[i].vq;
1590 if (!vhost_vq_is_setup(vq))
1591 continue;
1592
2e1139d6
ZC
1593 ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
1594 if (ret)
25b98b64
MC
1595 goto destroy_vq_cmds;
1596 }
1597
f49c2226 1598 for (i = 0; i < vs->dev.nvqs; i++) {
3ab2e420 1599 vq = &vs->vqs[i].vq;
4f7f46d3 1600 mutex_lock(&vq->mutex);
247643f8 1601 vhost_vq_set_backend(vq, vs_tpg);
80f7d030 1602 vhost_vq_init_access(vq);
4f7f46d3
AH
1603 mutex_unlock(&vq->mutex);
1604 }
67e18cf9
AH
1605 ret = 0;
1606 } else {
1607 ret = -EEXIST;
1608 }
1609
4f7f46d3
AH
1610 /*
1611 * Act as synchronize_rcu to make sure access to
1612 * old vs->vs_tpg is finished.
1613 */
1614 vhost_scsi_flush(vs);
1615 kfree(vs->vs_tpg);
1616 vs->vs_tpg = vs_tpg;
25b98b64 1617 goto out;
4f7f46d3 1618
25b98b64
MC
1619destroy_vq_cmds:
1620 for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
1621 if (!vhost_vq_get_backend(&vs->vqs[i].vq))
1622 vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
1623 }
1624undepend:
1625 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1626 tpg = vs_tpg[i];
1627 if (tpg) {
e508efc3
MC
1628 mutex_lock(&tpg->tv_tpg_mutex);
1629 tpg->vhost_scsi = NULL;
25b98b64 1630 tpg->tv_tpg_vhost_count--;
e508efc3 1631 mutex_unlock(&tpg->tv_tpg_mutex);
25b98b64
MC
1632 target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1633 }
1634 }
1635 kfree(vs_tpg);
f2b7daf5 1636out:
67e18cf9
AH
1637 mutex_unlock(&vs->dev.mutex);
1638 return ret;
057cbf49
NB
1639}
1640
683bd967
AH
1641static int
1642vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1643 struct vhost_scsi_target *t)
057cbf49 1644{
ab8edab1 1645 struct se_portal_group *se_tpg;
1a1ff825
NB
1646 struct vhost_scsi_tport *tv_tport;
1647 struct vhost_scsi_tpg *tpg;
4f7f46d3
AH
1648 struct vhost_virtqueue *vq;
1649 bool match = false;
67e18cf9
AH
1650 int index, ret, i;
1651 u8 target;
057cbf49
NB
1652
1653 mutex_lock(&vs->dev.mutex);
1654 /* Verify that ring has been setup correctly. */
1655 for (index = 0; index < vs->dev.nvqs; ++index) {
3ab2e420 1656 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
101998f6 1657 ret = -EFAULT;
038e0af4 1658 goto err_dev;
057cbf49
NB
1659 }
1660 }
4f7f46d3
AH
1661
1662 if (!vs->vs_tpg) {
f2b7daf5
AH
1663 ret = 0;
1664 goto err_dev;
4f7f46d3
AH
1665 }
1666
67e18cf9
AH
1667 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1668 target = i;
98718312
AH
1669 tpg = vs->vs_tpg[target];
1670 if (!tpg)
67e18cf9
AH
1671 continue;
1672
98718312 1673 tv_tport = tpg->tport;
67e18cf9
AH
1674 if (!tv_tport) {
1675 ret = -ENODEV;
9a10cb4d 1676 goto err_dev;
67e18cf9
AH
1677 }
1678
1679 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
98718312 1680 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
67e18cf9 1681 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
98718312 1682 tv_tport->tport_name, tpg->tport_tpgt,
67e18cf9
AH
1683 t->vhost_wwpn, t->vhost_tpgt);
1684 ret = -EINVAL;
9a10cb4d 1685 goto err_dev;
67e18cf9 1686 }
9a10cb4d
MC
1687 match = true;
1688 }
1689 if (!match)
1690 goto free_vs_tpg;
1691
1692 /* Prevent new cmds from starting and accessing the tpgs/sessions */
1693 for (i = 0; i < vs->dev.nvqs; i++) {
1694 vq = &vs->vqs[i].vq;
1695 mutex_lock(&vq->mutex);
1696 vhost_vq_set_backend(vq, NULL);
1697 mutex_unlock(&vq->mutex);
1698 }
1699 /* Make sure cmds are not running before tearing them down. */
1700 vhost_scsi_flush(vs);
1701
1702 for (i = 0; i < vs->dev.nvqs; i++) {
1703 vq = &vs->vqs[i].vq;
1704 vhost_scsi_destroy_vq_cmds(vq);
1705 }
1706
1707 /*
1708 * We can now release our hold on the tpg and sessions and userspace
1709 * can free them after this point.
1710 */
1711 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1712 target = i;
1713 tpg = vs->vs_tpg[target];
1714 if (!tpg)
1715 continue;
1716
1717 mutex_lock(&tpg->tv_tpg_mutex);
1718
98718312
AH
1719 tpg->tv_tpg_vhost_count--;
1720 tpg->vhost_scsi = NULL;
67e18cf9 1721 vs->vs_tpg[target] = NULL;
9a10cb4d 1722
98718312 1723 mutex_unlock(&tpg->tv_tpg_mutex);
9a10cb4d 1724
ab8edab1 1725 se_tpg = &tpg->se_tpg;
d588cf8f 1726 target_undepend_item(&se_tpg->tpg_group.cg_item);
057cbf49 1727 }
d60146c1 1728
9a10cb4d 1729free_vs_tpg:
4f7f46d3
AH
1730 /*
1731 * Act as synchronize_rcu to make sure access to
1732 * old vs->vs_tpg is finished.
1733 */
1734 vhost_scsi_flush(vs);
1735 kfree(vs->vs_tpg);
1736 vs->vs_tpg = NULL;
a6c9af87 1737 WARN_ON(vs->vs_events_nr);
057cbf49 1738 mutex_unlock(&vs->dev.mutex);
057cbf49 1739 return 0;
101998f6 1740
038e0af4 1741err_dev:
101998f6
NB
1742 mutex_unlock(&vs->dev.mutex);
1743 return ret;
057cbf49
NB
1744}
1745
4f7f46d3
AH
1746static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1747{
ea16c514
MT
1748 struct vhost_virtqueue *vq;
1749 int i;
1750
4f7f46d3
AH
1751 if (features & ~VHOST_SCSI_FEATURES)
1752 return -EOPNOTSUPP;
1753
1754 mutex_lock(&vs->dev.mutex);
1755 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1756 !vhost_log_access_ok(&vs->dev)) {
1757 mutex_unlock(&vs->dev.mutex);
1758 return -EFAULT;
1759 }
ea16c514 1760
f49c2226 1761 for (i = 0; i < vs->dev.nvqs; i++) {
ea16c514
MT
1762 vq = &vs->vqs[i].vq;
1763 mutex_lock(&vq->mutex);
1764 vq->acked_features = features;
1765 mutex_unlock(&vq->mutex);
1766 }
4f7f46d3
AH
1767 mutex_unlock(&vs->dev.mutex);
1768 return 0;
1769}
1770
057cbf49
NB
1771static int vhost_scsi_open(struct inode *inode, struct file *f)
1772{
c7289312 1773 struct vhost_scsi *vs;
3ab2e420 1774 struct vhost_virtqueue **vqs;
f49c2226 1775 int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
057cbf49 1776
489084dd
DZ
1777 vs = kvzalloc(sizeof(*vs), GFP_KERNEL);
1778 if (!vs)
1779 goto err_vs;
057cbf49 1780
f49c2226
MC
1781 if (nvqs > VHOST_SCSI_MAX_IO_VQ) {
1782 pr_err("Invalid max_io_vqs of %d. Using %d.\n", nvqs,
1783 VHOST_SCSI_MAX_IO_VQ);
1784 nvqs = VHOST_SCSI_MAX_IO_VQ;
1785 } else if (nvqs == 0) {
1786 pr_err("Invalid max_io_vqs of %d. Using 1.\n", nvqs);
1787 nvqs = 1;
1788 }
1789 nvqs += VHOST_SCSI_VQ_IO;
1790
1791 vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
1792 if (!vs->compl_bitmap)
1793 goto err_compl_bitmap;
1794
1795 vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
1796 GFP_KERNEL | __GFP_ZERO);
1797 if (!vs->old_inflight)
1798 goto err_inflight;
1799
1800 vs->vqs = kmalloc_array(nvqs, sizeof(*vs->vqs),
1801 GFP_KERNEL | __GFP_ZERO);
1802 if (!vs->vqs)
595cb754 1803 goto err_vqs;
3ab2e420 1804
f49c2226
MC
1805 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1806 if (!vqs)
1807 goto err_local_vqs;
1808
c7289312 1809 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1a1ff825 1810 vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
a6c9af87 1811
c7289312
AH
1812 vs->vs_events_nr = 0;
1813 vs->vs_events_missed = false;
057cbf49 1814
c7289312
AH
1815 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1816 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1817 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1818 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
f49c2226 1819 for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
c7289312
AH
1820 vqs[i] = &vs->vqs[i].vq;
1821 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
3ab2e420 1822 }
f49c2226 1823 vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
01fcb1cb 1824 VHOST_SCSI_WEIGHT, 0, true, NULL);
f2f0173d 1825
1a1ff825 1826 vhost_scsi_init_inflight(vs, NULL);
f2f0173d 1827
c7289312 1828 f->private_data = vs;
057cbf49 1829 return 0;
595cb754 1830
f49c2226
MC
1831err_local_vqs:
1832 kfree(vs->vqs);
595cb754 1833err_vqs:
f49c2226
MC
1834 kfree(vs->old_inflight);
1835err_inflight:
1836 bitmap_free(vs->compl_bitmap);
1837err_compl_bitmap:
68404441 1838 kvfree(vs);
595cb754
MT
1839err_vs:
1840 return r;
057cbf49
NB
1841}
1842
1843static int vhost_scsi_release(struct inode *inode, struct file *f)
1844{
c7289312 1845 struct vhost_scsi *vs = f->private_data;
67e18cf9 1846 struct vhost_scsi_target t;
057cbf49 1847
c7289312
AH
1848 mutex_lock(&vs->dev.mutex);
1849 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1850 mutex_unlock(&vs->dev.mutex);
1851 vhost_scsi_clear_endpoint(vs, &t);
1852 vhost_dev_stop(&vs->dev);
f6f93f75 1853 vhost_dev_cleanup(&vs->dev);
c7289312 1854 kfree(vs->dev.vqs);
f49c2226
MC
1855 kfree(vs->vqs);
1856 kfree(vs->old_inflight);
1857 bitmap_free(vs->compl_bitmap);
68404441 1858 kvfree(vs);
057cbf49
NB
1859 return 0;
1860}
1861
683bd967
AH
1862static long
1863vhost_scsi_ioctl(struct file *f,
1864 unsigned int ioctl,
1865 unsigned long arg)
057cbf49
NB
1866{
1867 struct vhost_scsi *vs = f->private_data;
1868 struct vhost_scsi_target backend;
1869 void __user *argp = (void __user *)arg;
1870 u64 __user *featurep = argp;
11c63418
AH
1871 u32 __user *eventsp = argp;
1872 u32 events_missed;
057cbf49 1873 u64 features;
101998f6 1874 int r, abi_version = VHOST_SCSI_ABI_VERSION;
3ab2e420 1875 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
057cbf49
NB
1876
1877 switch (ioctl) {
1878 case VHOST_SCSI_SET_ENDPOINT:
1879 if (copy_from_user(&backend, argp, sizeof backend))
1880 return -EFAULT;
6de7145c
MT
1881 if (backend.reserved != 0)
1882 return -EOPNOTSUPP;
057cbf49
NB
1883
1884 return vhost_scsi_set_endpoint(vs, &backend);
1885 case VHOST_SCSI_CLEAR_ENDPOINT:
1886 if (copy_from_user(&backend, argp, sizeof backend))
1887 return -EFAULT;
6de7145c
MT
1888 if (backend.reserved != 0)
1889 return -EOPNOTSUPP;
057cbf49
NB
1890
1891 return vhost_scsi_clear_endpoint(vs, &backend);
1892 case VHOST_SCSI_GET_ABI_VERSION:
101998f6 1893 if (copy_to_user(argp, &abi_version, sizeof abi_version))
057cbf49
NB
1894 return -EFAULT;
1895 return 0;
11c63418
AH
1896 case VHOST_SCSI_SET_EVENTS_MISSED:
1897 if (get_user(events_missed, eventsp))
1898 return -EFAULT;
1899 mutex_lock(&vq->mutex);
1900 vs->vs_events_missed = events_missed;
1901 mutex_unlock(&vq->mutex);
1902 return 0;
1903 case VHOST_SCSI_GET_EVENTS_MISSED:
1904 mutex_lock(&vq->mutex);
1905 events_missed = vs->vs_events_missed;
1906 mutex_unlock(&vq->mutex);
1907 if (put_user(events_missed, eventsp))
1908 return -EFAULT;
1909 return 0;
057cbf49 1910 case VHOST_GET_FEATURES:
5dade710 1911 features = VHOST_SCSI_FEATURES;
057cbf49
NB
1912 if (copy_to_user(featurep, &features, sizeof features))
1913 return -EFAULT;
1914 return 0;
1915 case VHOST_SET_FEATURES:
1916 if (copy_from_user(&features, featurep, sizeof features))
1917 return -EFAULT;
1918 return vhost_scsi_set_features(vs, features);
1919 default:
1920 mutex_lock(&vs->dev.mutex);
935cdee7
MT
1921 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1922 /* TODO: flush backend after dev ioctl. */
1923 if (r == -ENOIOCTLCMD)
1924 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
057cbf49
NB
1925 mutex_unlock(&vs->dev.mutex);
1926 return r;
1927 }
1928}
1929
1930static const struct file_operations vhost_scsi_fops = {
1931 .owner = THIS_MODULE,
1932 .release = vhost_scsi_release,
1933 .unlocked_ioctl = vhost_scsi_ioctl,
407e9ef7 1934 .compat_ioctl = compat_ptr_ioctl,
057cbf49
NB
1935 .open = vhost_scsi_open,
1936 .llseek = noop_llseek,
1937};
1938
1939static struct miscdevice vhost_scsi_misc = {
1940 MISC_DYNAMIC_MINOR,
1941 "vhost-scsi",
1942 &vhost_scsi_fops,
1943};
1944
1945static int __init vhost_scsi_register(void)
1946{
1947 return misc_register(&vhost_scsi_misc);
1948}
1949
f368ed60 1950static void vhost_scsi_deregister(void)
057cbf49 1951{
f368ed60 1952 misc_deregister(&vhost_scsi_misc);
057cbf49
NB
1953}
1954
1a1ff825 1955static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
057cbf49
NB
1956{
1957 switch (tport->tport_proto_id) {
1958 case SCSI_PROTOCOL_SAS:
1959 return "SAS";
1960 case SCSI_PROTOCOL_FCP:
1961 return "FCP";
1962 case SCSI_PROTOCOL_ISCSI:
1963 return "iSCSI";
1964 default:
1965 break;
1966 }
1967
1968 return "Unknown";
1969}
1970
683bd967 1971static void
1a1ff825 1972vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
683bd967 1973 struct se_lun *lun, bool plug)
a6c9af87
AH
1974{
1975
1976 struct vhost_scsi *vs = tpg->vhost_scsi;
1977 struct vhost_virtqueue *vq;
1978 u32 reason;
1979
1980 if (!vs)
1981 return;
1982
a6c9af87
AH
1983 if (plug)
1984 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1985 else
1986 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1987
3ab2e420 1988 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
a6c9af87 1989 mutex_lock(&vq->mutex);
ced9eb37
MC
1990 /*
1991 * We can't queue events if the backend has been cleared, because
1992 * we could end up queueing an event after the flush.
1993 */
1994 if (!vhost_vq_get_backend(vq))
1995 goto unlock;
1996
ea16c514 1997 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1a1ff825 1998 vhost_scsi_send_evt(vs, tpg, lun,
ea16c514 1999 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
ced9eb37 2000unlock:
a6c9af87 2001 mutex_unlock(&vq->mutex);
a6c9af87
AH
2002}
2003
1a1ff825 2004static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
a6c9af87 2005{
1a1ff825 2006 vhost_scsi_do_plug(tpg, lun, true);
a6c9af87
AH
2007}
2008
1a1ff825 2009static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
a6c9af87 2010{
1a1ff825 2011 vhost_scsi_do_plug(tpg, lun, false);
a6c9af87
AH
2012}
2013
1a1ff825 2014static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
683bd967 2015 struct se_lun *lun)
057cbf49 2016{
1a1ff825
NB
2017 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2018 struct vhost_scsi_tpg, se_tpg);
057cbf49 2019
98718312
AH
2020 mutex_lock(&tpg->tv_tpg_mutex);
2021 tpg->tv_tpg_port_count++;
1a1ff825 2022 vhost_scsi_hotplug(tpg, lun);
f5ed6f9e 2023 mutex_unlock(&tpg->tv_tpg_mutex);
a6c9af87 2024
057cbf49
NB
2025 return 0;
2026}
2027
1a1ff825 2028static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
683bd967 2029 struct se_lun *lun)
057cbf49 2030{
1a1ff825
NB
2031 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2032 struct vhost_scsi_tpg, se_tpg);
057cbf49 2033
98718312
AH
2034 mutex_lock(&tpg->tv_tpg_mutex);
2035 tpg->tv_tpg_port_count--;
1a1ff825 2036 vhost_scsi_hotunplug(tpg, lun);
f5ed6f9e 2037 mutex_unlock(&tpg->tv_tpg_mutex);
057cbf49
NB
2038}
2039
2eafd729
CH
2040static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
2041 struct config_item *item, const char *page, size_t count)
b1d75fe5 2042{
2eafd729 2043 struct se_portal_group *se_tpg = attrib_to_tpg(item);
b1d75fe5
NB
2044 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2045 struct vhost_scsi_tpg, se_tpg);
2046 unsigned long val;
2047 int ret = kstrtoul(page, 0, &val);
2048
2049 if (ret) {
2050 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
2051 return ret;
2052 }
2053 if (val != 0 && val != 1 && val != 3) {
2054 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
2055 return -EINVAL;
2056 }
2057 tpg->tv_fabric_prot_type = val;
2058
2059 return count;
2060}
2061
2eafd729
CH
2062static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
2063 struct config_item *item, char *page)
b1d75fe5 2064{
2eafd729 2065 struct se_portal_group *se_tpg = attrib_to_tpg(item);
b1d75fe5
NB
2066 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2067 struct vhost_scsi_tpg, se_tpg);
2068
b3d4f02e 2069 return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
b1d75fe5 2070}
2eafd729
CH
2071
2072CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
b1d75fe5
NB
2073
2074static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
2eafd729 2075 &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
b1d75fe5
NB
2076 NULL,
2077};
2078
65ea7898
NB
2079static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2080 const char *name)
2081{
65ea7898
NB
2082 struct vhost_scsi_nexus *tv_nexus;
2083
2084 mutex_lock(&tpg->tv_tpg_mutex);
2085 if (tpg->tpg_nexus) {
2086 mutex_unlock(&tpg->tv_tpg_mutex);
2087 pr_debug("tpg->tpg_nexus already exists\n");
2088 return -EEXIST;
2089 }
65ea7898 2090
473f0b15 2091 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
65ea7898
NB
2092 if (!tv_nexus) {
2093 mutex_unlock(&tpg->tv_tpg_mutex);
2094 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
2095 return -ENOMEM;
2096 }
057cbf49
NB
2097 /*
2098 * Since we are running in 'demo mode' this call with generate a
1a1ff825 2099 * struct se_node_acl for the vhost_scsi struct se_portal_group with
057cbf49
NB
2100 * the SCSI Initiator port name of the passed configfs group 'name'.
2101 */
25b98b64 2102 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
65ea7898 2103 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
25b98b64 2104 (unsigned char *)name, tv_nexus, NULL);
65ea7898 2105 if (IS_ERR(tv_nexus->tvn_se_sess)) {
98718312 2106 mutex_unlock(&tpg->tv_tpg_mutex);
65ea7898
NB
2107 kfree(tv_nexus);
2108 return -ENOMEM;
057cbf49 2109 }
98718312 2110 tpg->tpg_nexus = tv_nexus;
057cbf49 2111
98718312 2112 mutex_unlock(&tpg->tv_tpg_mutex);
057cbf49
NB
2113 return 0;
2114}
2115
1a1ff825 2116static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
057cbf49
NB
2117{
2118 struct se_session *se_sess;
1a1ff825 2119 struct vhost_scsi_nexus *tv_nexus;
057cbf49
NB
2120
2121 mutex_lock(&tpg->tv_tpg_mutex);
2122 tv_nexus = tpg->tpg_nexus;
2123 if (!tv_nexus) {
2124 mutex_unlock(&tpg->tv_tpg_mutex);
2125 return -ENODEV;
2126 }
2127
2128 se_sess = tv_nexus->tvn_se_sess;
2129 if (!se_sess) {
2130 mutex_unlock(&tpg->tv_tpg_mutex);
2131 return -ENODEV;
2132 }
2133
101998f6 2134 if (tpg->tv_tpg_port_count != 0) {
057cbf49 2135 mutex_unlock(&tpg->tv_tpg_mutex);
101998f6 2136 pr_err("Unable to remove TCM_vhost I_T Nexus with"
057cbf49 2137 " active TPG port count: %d\n",
101998f6
NB
2138 tpg->tv_tpg_port_count);
2139 return -EBUSY;
057cbf49
NB
2140 }
2141
101998f6 2142 if (tpg->tv_tpg_vhost_count != 0) {
057cbf49 2143 mutex_unlock(&tpg->tv_tpg_mutex);
101998f6 2144 pr_err("Unable to remove TCM_vhost I_T Nexus with"
057cbf49 2145 " active TPG vhost count: %d\n",
101998f6
NB
2146 tpg->tv_tpg_vhost_count);
2147 return -EBUSY;
057cbf49
NB
2148 }
2149
101998f6 2150 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1a1ff825 2151 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
057cbf49 2152 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
3aee26b4 2153
057cbf49 2154 /*
101998f6 2155 * Release the SCSI I_T Nexus to the emulated vhost Target Port
057cbf49 2156 */
25b88550 2157 target_remove_session(se_sess);
057cbf49
NB
2158 tpg->tpg_nexus = NULL;
2159 mutex_unlock(&tpg->tv_tpg_mutex);
2160
2161 kfree(tv_nexus);
2162 return 0;
2163}
2164
2eafd729 2165static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
057cbf49 2166{
2eafd729 2167 struct se_portal_group *se_tpg = to_tpg(item);
1a1ff825
NB
2168 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2169 struct vhost_scsi_tpg, se_tpg);
2170 struct vhost_scsi_nexus *tv_nexus;
057cbf49
NB
2171 ssize_t ret;
2172
98718312
AH
2173 mutex_lock(&tpg->tv_tpg_mutex);
2174 tv_nexus = tpg->tpg_nexus;
057cbf49 2175 if (!tv_nexus) {
98718312 2176 mutex_unlock(&tpg->tv_tpg_mutex);
057cbf49
NB
2177 return -ENODEV;
2178 }
b3d4f02e 2179 ret = sysfs_emit(page, "%s\n",
057cbf49 2180 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
98718312 2181 mutex_unlock(&tpg->tv_tpg_mutex);
057cbf49
NB
2182
2183 return ret;
2184}
2185
2eafd729
CH
2186static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
2187 const char *page, size_t count)
057cbf49 2188{
2eafd729 2189 struct se_portal_group *se_tpg = to_tpg(item);
1a1ff825
NB
2190 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2191 struct vhost_scsi_tpg, se_tpg);
2192 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2193 unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
057cbf49
NB
2194 int ret;
2195 /*
2196 * Shutdown the active I_T nexus if 'NULL' is passed..
2197 */
2198 if (!strncmp(page, "NULL", 4)) {
1a1ff825 2199 ret = vhost_scsi_drop_nexus(tpg);
057cbf49
NB
2200 return (!ret) ? count : ret;
2201 }
2202 /*
2203 * Otherwise make sure the passed virtual Initiator port WWN matches
1a1ff825
NB
2204 * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2205 * vhost_scsi_make_nexus().
057cbf49 2206 */
1a1ff825 2207 if (strlen(page) >= VHOST_SCSI_NAMELEN) {
057cbf49 2208 pr_err("Emulated NAA Sas Address: %s, exceeds"
1a1ff825 2209 " max: %d\n", page, VHOST_SCSI_NAMELEN);
057cbf49
NB
2210 return -EINVAL;
2211 }
1a1ff825 2212 snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
057cbf49
NB
2213
2214 ptr = strstr(i_port, "naa.");
2215 if (ptr) {
2216 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2217 pr_err("Passed SAS Initiator Port %s does not"
2218 " match target port protoid: %s\n", i_port,
1a1ff825 2219 vhost_scsi_dump_proto_id(tport_wwn));
057cbf49
NB
2220 return -EINVAL;
2221 }
2222 port_ptr = &i_port[0];
2223 goto check_newline;
2224 }
2225 ptr = strstr(i_port, "fc.");
2226 if (ptr) {
2227 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2228 pr_err("Passed FCP Initiator Port %s does not"
2229 " match target port protoid: %s\n", i_port,
1a1ff825 2230 vhost_scsi_dump_proto_id(tport_wwn));
057cbf49
NB
2231 return -EINVAL;
2232 }
2233 port_ptr = &i_port[3]; /* Skip over "fc." */
2234 goto check_newline;
2235 }
2236 ptr = strstr(i_port, "iqn.");
2237 if (ptr) {
2238 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2239 pr_err("Passed iSCSI Initiator Port %s does not"
2240 " match target port protoid: %s\n", i_port,
1a1ff825 2241 vhost_scsi_dump_proto_id(tport_wwn));
057cbf49
NB
2242 return -EINVAL;
2243 }
2244 port_ptr = &i_port[0];
2245 goto check_newline;
2246 }
2247 pr_err("Unable to locate prefix for emulated Initiator Port:"
2248 " %s\n", i_port);
2249 return -EINVAL;
2250 /*
2251 * Clear any trailing newline for the NAA WWN
2252 */
2253check_newline:
2254 if (i_port[strlen(i_port)-1] == '\n')
2255 i_port[strlen(i_port)-1] = '\0';
2256
1a1ff825 2257 ret = vhost_scsi_make_nexus(tpg, port_ptr);
057cbf49
NB
2258 if (ret < 0)
2259 return ret;
2260
2261 return count;
2262}
2263
2eafd729 2264CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
057cbf49 2265
1a1ff825 2266static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2eafd729 2267 &vhost_scsi_tpg_attr_nexus,
057cbf49
NB
2268 NULL,
2269};
2270
683bd967 2271static struct se_portal_group *
aa090eab 2272vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
057cbf49 2273{
1a1ff825
NB
2274 struct vhost_scsi_tport *tport = container_of(wwn,
2275 struct vhost_scsi_tport, tport_wwn);
057cbf49 2276
1a1ff825 2277 struct vhost_scsi_tpg *tpg;
59c816c1 2278 u16 tpgt;
057cbf49
NB
2279 int ret;
2280
2281 if (strstr(name, "tpgt_") != name)
2282 return ERR_PTR(-EINVAL);
59c816c1 2283 if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
057cbf49
NB
2284 return ERR_PTR(-EINVAL);
2285
473f0b15 2286 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
057cbf49 2287 if (!tpg) {
1a1ff825 2288 pr_err("Unable to allocate struct vhost_scsi_tpg");
057cbf49
NB
2289 return ERR_PTR(-ENOMEM);
2290 }
2291 mutex_init(&tpg->tv_tpg_mutex);
2292 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2293 tpg->tport = tport;
2294 tpg->tport_tpgt = tpgt;
2295
bc0c94b1 2296 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
057cbf49
NB
2297 if (ret < 0) {
2298 kfree(tpg);
2299 return NULL;
2300 }
1a1ff825
NB
2301 mutex_lock(&vhost_scsi_mutex);
2302 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2303 mutex_unlock(&vhost_scsi_mutex);
057cbf49
NB
2304
2305 return &tpg->se_tpg;
2306}
2307
1a1ff825 2308static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
057cbf49 2309{
1a1ff825
NB
2310 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2311 struct vhost_scsi_tpg, se_tpg);
057cbf49 2312
1a1ff825 2313 mutex_lock(&vhost_scsi_mutex);
057cbf49 2314 list_del(&tpg->tv_tpg_list);
1a1ff825 2315 mutex_unlock(&vhost_scsi_mutex);
057cbf49 2316 /*
101998f6 2317 * Release the virtual I_T Nexus for this vhost TPG
057cbf49 2318 */
1a1ff825 2319 vhost_scsi_drop_nexus(tpg);
057cbf49
NB
2320 /*
2321 * Deregister the se_tpg from TCM..
2322 */
2323 core_tpg_deregister(se_tpg);
2324 kfree(tpg);
2325}
2326
683bd967 2327static struct se_wwn *
1a1ff825 2328vhost_scsi_make_tport(struct target_fabric_configfs *tf,
683bd967
AH
2329 struct config_group *group,
2330 const char *name)
057cbf49 2331{
1a1ff825 2332 struct vhost_scsi_tport *tport;
057cbf49
NB
2333 char *ptr;
2334 u64 wwpn = 0;
2335 int off = 0;
2336
1a1ff825 2337 /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
057cbf49
NB
2338 return ERR_PTR(-EINVAL); */
2339
473f0b15 2340 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
057cbf49 2341 if (!tport) {
1a1ff825 2342 pr_err("Unable to allocate struct vhost_scsi_tport");
057cbf49
NB
2343 return ERR_PTR(-ENOMEM);
2344 }
2345 tport->tport_wwpn = wwpn;
2346 /*
2347 * Determine the emulated Protocol Identifier and Target Port Name
2348 * based on the incoming configfs directory name.
2349 */
2350 ptr = strstr(name, "naa.");
2351 if (ptr) {
2352 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2353 goto check_len;
2354 }
2355 ptr = strstr(name, "fc.");
2356 if (ptr) {
2357 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2358 off = 3; /* Skip over "fc." */
2359 goto check_len;
2360 }
2361 ptr = strstr(name, "iqn.");
2362 if (ptr) {
2363 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2364 goto check_len;
2365 }
2366
2367 pr_err("Unable to locate prefix for emulated Target Port:"
2368 " %s\n", name);
2369 kfree(tport);
2370 return ERR_PTR(-EINVAL);
2371
2372check_len:
1a1ff825 2373 if (strlen(name) >= VHOST_SCSI_NAMELEN) {
057cbf49 2374 pr_err("Emulated %s Address: %s, exceeds"
1a1ff825
NB
2375 " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2376 VHOST_SCSI_NAMELEN);
057cbf49
NB
2377 kfree(tport);
2378 return ERR_PTR(-EINVAL);
2379 }
1a1ff825 2380 snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
057cbf49
NB
2381
2382 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1a1ff825 2383 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
057cbf49
NB
2384
2385 return &tport->tport_wwn;
2386}
2387
1a1ff825 2388static void vhost_scsi_drop_tport(struct se_wwn *wwn)
057cbf49 2389{
1a1ff825
NB
2390 struct vhost_scsi_tport *tport = container_of(wwn,
2391 struct vhost_scsi_tport, tport_wwn);
057cbf49
NB
2392
2393 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1a1ff825 2394 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
057cbf49
NB
2395 tport->tport_name);
2396
2397 kfree(tport);
2398}
2399
683bd967 2400static ssize_t
2eafd729 2401vhost_scsi_wwn_version_show(struct config_item *item, char *page)
057cbf49 2402{
b3d4f02e 2403 return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s"
1a1ff825 2404 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
057cbf49
NB
2405 utsname()->machine);
2406}
2407
2eafd729 2408CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
057cbf49 2409
1a1ff825 2410static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2eafd729 2411 &vhost_scsi_wwn_attr_version,
057cbf49
NB
2412 NULL,
2413};
2414
1d822a40 2415static const struct target_core_fabric_ops vhost_scsi_ops = {
9ac8928e 2416 .module = THIS_MODULE,
30c7ca93 2417 .fabric_name = "vhost",
5ae6a6a9 2418 .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
1a1ff825
NB
2419 .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
2420 .tpg_get_tag = vhost_scsi_get_tpgt,
1a1ff825
NB
2421 .tpg_check_demo_mode = vhost_scsi_check_true,
2422 .tpg_check_demo_mode_cache = vhost_scsi_check_true,
b1d75fe5 2423 .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
1a1ff825 2424 .release_cmd = vhost_scsi_release_cmd,
084ed45b 2425 .check_stop_free = vhost_scsi_check_stop_free,
057cbf49 2426 .sess_get_initiator_sid = NULL,
1a1ff825 2427 .write_pending = vhost_scsi_write_pending,
1a1ff825
NB
2428 .queue_data_in = vhost_scsi_queue_data_in,
2429 .queue_status = vhost_scsi_queue_status,
2430 .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
2431 .aborted_task = vhost_scsi_aborted_task,
057cbf49
NB
2432 /*
2433 * Setup callers for generic logic in target_core_fabric_configfs.c
2434 */
1a1ff825
NB
2435 .fabric_make_wwn = vhost_scsi_make_tport,
2436 .fabric_drop_wwn = vhost_scsi_drop_tport,
2437 .fabric_make_tpg = vhost_scsi_make_tpg,
2438 .fabric_drop_tpg = vhost_scsi_drop_tpg,
2439 .fabric_post_link = vhost_scsi_port_link,
2440 .fabric_pre_unlink = vhost_scsi_port_unlink,
9ac8928e
CH
2441
2442 .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
2443 .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
2444 .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
057cbf49
NB
2445};
2446
9ac8928e 2447static int __init vhost_scsi_init(void)
057cbf49 2448{
9ac8928e 2449 int ret = -ENOMEM;
057cbf49 2450
9ac8928e 2451 pr_debug("TCM_VHOST fabric module %s on %s/%s"
1a1ff825 2452 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
057cbf49 2453 utsname()->machine);
057cbf49 2454
057cbf49
NB
2455 ret = vhost_scsi_register();
2456 if (ret < 0)
6ec29cb8 2457 goto out;
057cbf49 2458
9ac8928e 2459 ret = target_register_template(&vhost_scsi_ops);
057cbf49
NB
2460 if (ret < 0)
2461 goto out_vhost_scsi_deregister;
2462
2463 return 0;
2464
2465out_vhost_scsi_deregister:
2466 vhost_scsi_deregister();
057cbf49
NB
2467out:
2468 return ret;
2469};
2470
1a1ff825 2471static void vhost_scsi_exit(void)
057cbf49 2472{
9ac8928e 2473 target_unregister_template(&vhost_scsi_ops);
057cbf49 2474 vhost_scsi_deregister();
057cbf49
NB
2475};
2476
181c04a3
MT
2477MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2478MODULE_ALIAS("tcm_vhost");
057cbf49 2479MODULE_LICENSE("GPL");
1a1ff825
NB
2480module_init(vhost_scsi_init);
2481module_exit(vhost_scsi_exit);