tcm_vhost: Introduce iov_num_pages
[linux-block.git] / drivers / vhost / tcm_vhost.c
CommitLineData
057cbf49
NB
1/*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
3 *
4 * (C) Copyright 2010-2012 RisingTide Systems LLC.
5 * (C) Copyright 2010-2012 IBM Corp.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <generated/utsrelease.h>
27#include <linux/utsname.h>
28#include <linux/init.h>
29#include <linux/slab.h>
30#include <linux/kthread.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/configfs.h>
34#include <linux/ctype.h>
35#include <linux/compat.h>
36#include <linux/eventfd.h>
057cbf49
NB
37#include <linux/fs.h>
38#include <linux/miscdevice.h>
39#include <asm/unaligned.h>
40#include <scsi/scsi.h>
41#include <scsi/scsi_tcq.h>
42#include <target/target_core_base.h>
43#include <target/target_core_fabric.h>
44#include <target/target_core_fabric_configfs.h>
45#include <target/target_core_configfs.h>
46#include <target/configfs_macros.h>
47#include <linux/vhost.h>
48#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
49#include <linux/virtio_scsi.h>
9d6064a3 50#include <linux/llist.h>
057cbf49
NB
51
52#include "vhost.c"
53#include "vhost.h"
54#include "tcm_vhost.h"
55
101998f6
NB
56enum {
57 VHOST_SCSI_VQ_CTL = 0,
58 VHOST_SCSI_VQ_EVT = 1,
59 VHOST_SCSI_VQ_IO = 2,
60};
61
057cbf49 62struct vhost_scsi {
101998f6 63 struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
057cbf49
NB
64 struct vhost_dev dev;
65 struct vhost_virtqueue vqs[3];
66
67 struct vhost_work vs_completion_work; /* cmd completion work item */
9d6064a3 68 struct llist_head vs_completion_list; /* cmd completion queue */
057cbf49
NB
69};
70
71/* Local pointer to allocated TCM configfs fabric module */
72static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
73
74static struct workqueue_struct *tcm_vhost_workqueue;
75
76/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
77static DEFINE_MUTEX(tcm_vhost_mutex);
78static LIST_HEAD(tcm_vhost_list);
79
765b34fd
AH
80static int iov_num_pages(struct iovec *iov)
81{
82 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
83 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
84}
85
057cbf49
NB
86static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
87{
88 return 1;
89}
90
91static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
92{
93 return 0;
94}
95
96static char *tcm_vhost_get_fabric_name(void)
97{
98 return "vhost";
99}
100
101static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
102{
103 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
104 struct tcm_vhost_tpg, se_tpg);
105 struct tcm_vhost_tport *tport = tpg->tport;
106
107 switch (tport->tport_proto_id) {
108 case SCSI_PROTOCOL_SAS:
109 return sas_get_fabric_proto_ident(se_tpg);
110 case SCSI_PROTOCOL_FCP:
111 return fc_get_fabric_proto_ident(se_tpg);
112 case SCSI_PROTOCOL_ISCSI:
113 return iscsi_get_fabric_proto_ident(se_tpg);
114 default:
115 pr_err("Unknown tport_proto_id: 0x%02x, using"
116 " SAS emulation\n", tport->tport_proto_id);
117 break;
118 }
119
120 return sas_get_fabric_proto_ident(se_tpg);
121}
122
123static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
124{
125 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
126 struct tcm_vhost_tpg, se_tpg);
127 struct tcm_vhost_tport *tport = tpg->tport;
128
129 return &tport->tport_name[0];
130}
131
132static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
133{
134 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
135 struct tcm_vhost_tpg, se_tpg);
136 return tpg->tport_tpgt;
137}
138
139static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
140{
141 return 1;
142}
143
101998f6 144static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
057cbf49
NB
145 struct se_node_acl *se_nacl,
146 struct t10_pr_registration *pr_reg,
147 int *format_code,
148 unsigned char *buf)
149{
150 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
151 struct tcm_vhost_tpg, se_tpg);
152 struct tcm_vhost_tport *tport = tpg->tport;
153
154 switch (tport->tport_proto_id) {
155 case SCSI_PROTOCOL_SAS:
156 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
157 format_code, buf);
158 case SCSI_PROTOCOL_FCP:
159 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
160 format_code, buf);
161 case SCSI_PROTOCOL_ISCSI:
162 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
163 format_code, buf);
164 default:
165 pr_err("Unknown tport_proto_id: 0x%02x, using"
166 " SAS emulation\n", tport->tport_proto_id);
167 break;
168 }
169
170 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
171 format_code, buf);
172}
173
101998f6 174static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
057cbf49
NB
175 struct se_node_acl *se_nacl,
176 struct t10_pr_registration *pr_reg,
177 int *format_code)
178{
179 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
180 struct tcm_vhost_tpg, se_tpg);
181 struct tcm_vhost_tport *tport = tpg->tport;
182
183 switch (tport->tport_proto_id) {
184 case SCSI_PROTOCOL_SAS:
185 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
186 format_code);
187 case SCSI_PROTOCOL_FCP:
188 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
189 format_code);
190 case SCSI_PROTOCOL_ISCSI:
191 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
192 format_code);
193 default:
194 pr_err("Unknown tport_proto_id: 0x%02x, using"
195 " SAS emulation\n", tport->tport_proto_id);
196 break;
197 }
198
199 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
200 format_code);
201}
202
101998f6 203static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
057cbf49
NB
204 const char *buf,
205 u32 *out_tid_len,
206 char **port_nexus_ptr)
207{
208 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
209 struct tcm_vhost_tpg, se_tpg);
210 struct tcm_vhost_tport *tport = tpg->tport;
211
212 switch (tport->tport_proto_id) {
213 case SCSI_PROTOCOL_SAS:
214 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
215 port_nexus_ptr);
216 case SCSI_PROTOCOL_FCP:
217 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
218 port_nexus_ptr);
219 case SCSI_PROTOCOL_ISCSI:
220 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
221 port_nexus_ptr);
222 default:
223 pr_err("Unknown tport_proto_id: 0x%02x, using"
224 " SAS emulation\n", tport->tport_proto_id);
225 break;
226 }
227
228 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
229 port_nexus_ptr);
230}
231
232static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
233 struct se_portal_group *se_tpg)
234{
235 struct tcm_vhost_nacl *nacl;
236
237 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
238 if (!nacl) {
744627e9 239 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
057cbf49
NB
240 return NULL;
241 }
242
243 return &nacl->se_node_acl;
244}
245
101998f6 246static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
057cbf49
NB
247 struct se_node_acl *se_nacl)
248{
249 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
250 struct tcm_vhost_nacl, se_node_acl);
251 kfree(nacl);
252}
253
254static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
255{
256 return 1;
257}
258
259static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
260{
261 return;
262}
263
264static int tcm_vhost_shutdown_session(struct se_session *se_sess)
265{
266 return 0;
267}
268
269static void tcm_vhost_close_session(struct se_session *se_sess)
270{
271 return;
272}
273
274static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
275{
276 return 0;
277}
278
279static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
280{
281 /* Go ahead and process the write immediately */
282 target_execute_cmd(se_cmd);
283 return 0;
284}
285
286static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
287{
288 return 0;
289}
290
291static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
292{
293 return;
294}
295
296static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
297{
298 return 0;
299}
300
301static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
302{
303 return 0;
304}
305
101998f6
NB
306static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
307{
308 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
309
9d6064a3 310 llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
101998f6
NB
311
312 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
313}
057cbf49
NB
314
315static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
316{
317 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
318 struct tcm_vhost_cmd, tvc_se_cmd);
319 vhost_scsi_complete_cmd(tv_cmd);
320 return 0;
321}
322
323static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
324{
325 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
326 struct tcm_vhost_cmd, tvc_se_cmd);
327 vhost_scsi_complete_cmd(tv_cmd);
328 return 0;
329}
330
331static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
332{
333 return 0;
334}
335
057cbf49
NB
336static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
337{
338 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
339
340 /* TODO locking against target/backend threads? */
341 transport_generic_free_cmd(se_cmd, 1);
342
343 if (tv_cmd->tvc_sgl_count) {
344 u32 i;
345 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
346 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
347
348 kfree(tv_cmd->tvc_sgl);
349 }
350
351 kfree(tv_cmd);
352}
353
057cbf49
NB
354/* Fill in status and signal that we are done processing this command
355 *
356 * This is scheduled in the vhost work queue so we are called with the owner
357 * process mm and can access the vring.
358 */
359static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
360{
361 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
362 vs_completion_work);
9d6064a3 363 struct virtio_scsi_cmd_resp v_rsp;
057cbf49 364 struct tcm_vhost_cmd *tv_cmd;
9d6064a3
AH
365 struct llist_node *llnode;
366 struct se_cmd *se_cmd;
367 int ret;
057cbf49 368
9d6064a3
AH
369 llnode = llist_del_all(&vs->vs_completion_list);
370 while (llnode) {
371 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
372 tvc_completion_list);
373 llnode = llist_next(llnode);
374 se_cmd = &tv_cmd->tvc_se_cmd;
057cbf49
NB
375
376 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
377 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
378
379 memset(&v_rsp, 0, sizeof(v_rsp));
380 v_rsp.resid = se_cmd->residual_count;
381 /* TODO is status_qualifier field needed? */
382 v_rsp.status = se_cmd->scsi_status;
383 v_rsp.sense_len = se_cmd->scsi_sense_length;
384 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
385 v_rsp.sense_len);
386 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
387 if (likely(ret == 0))
388 vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
389 else
390 pr_err("Faulted on virtio_scsi_cmd_resp\n");
391
392 vhost_scsi_free_cmd(tv_cmd);
393 }
394
395 vhost_signal(&vs->dev, &vs->vqs[2]);
396}
397
057cbf49
NB
398static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
399 struct tcm_vhost_tpg *tv_tpg,
400 struct virtio_scsi_cmd_req *v_req,
401 u32 exp_data_len,
402 int data_direction)
403{
404 struct tcm_vhost_cmd *tv_cmd;
405 struct tcm_vhost_nexus *tv_nexus;
057cbf49
NB
406
407 tv_nexus = tv_tpg->tpg_nexus;
408 if (!tv_nexus) {
409 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
410 return ERR_PTR(-EIO);
411 }
057cbf49
NB
412
413 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
414 if (!tv_cmd) {
415 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
416 return ERR_PTR(-ENOMEM);
417 }
057cbf49 418 tv_cmd->tvc_tag = v_req->tag;
9f0abc15
NB
419 tv_cmd->tvc_task_attr = v_req->task_attr;
420 tv_cmd->tvc_exp_data_len = exp_data_len;
421 tv_cmd->tvc_data_direction = data_direction;
422 tv_cmd->tvc_nexus = tv_nexus;
057cbf49 423
057cbf49
NB
424 return tv_cmd;
425}
426
427/*
428 * Map a user memory range into a scatterlist
429 *
430 * Returns the number of scatterlist entries used or -errno on error.
431 */
432static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
433 unsigned int sgl_count, void __user *ptr, size_t len, int write)
434{
435 struct scatterlist *sg = sgl;
436 unsigned int npages = 0;
437 int ret;
438
439 while (len > 0) {
440 struct page *page;
441 unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
442 unsigned int nbytes = min_t(unsigned int,
443 PAGE_SIZE - offset, len);
444
445 if (npages == sgl_count) {
446 ret = -ENOBUFS;
447 goto err;
448 }
449
450 ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
451 BUG_ON(ret == 0); /* we should either get our page or fail */
452 if (ret < 0)
453 goto err;
454
455 sg_set_page(sg, page, nbytes, offset);
456 ptr += nbytes;
457 len -= nbytes;
458 sg++;
459 npages++;
460 }
461 return npages;
462
463err:
464 /* Put pages that we hold */
465 for (sg = sgl; sg != &sgl[npages]; sg++)
466 put_page(sg_page(sg));
467 return ret;
468}
469
470static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
471 struct iovec *iov, unsigned int niov, int write)
472{
473 int ret;
474 unsigned int i;
475 u32 sgl_count;
476 struct scatterlist *sg;
477
478 /*
479 * Find out how long sglist needs to be
480 */
481 sgl_count = 0;
482 for (i = 0; i < niov; i++) {
483 sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
484 PAGE_SIZE - 1) >> PAGE_SHIFT) -
485 ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
486 }
487 /* TODO overflow checking */
488
489 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
490 if (!sg)
491 return -ENOMEM;
f0e0e9bb
FW
492 pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
493 sg, sgl_count, !sg);
057cbf49
NB
494 sg_init_table(sg, sgl_count);
495
496 tv_cmd->tvc_sgl = sg;
497 tv_cmd->tvc_sgl_count = sgl_count;
498
499 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
500 for (i = 0; i < niov; i++) {
501 ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
502 iov[i].iov_len, write);
503 if (ret < 0) {
504 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
505 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
506 kfree(tv_cmd->tvc_sgl);
507 tv_cmd->tvc_sgl = NULL;
508 tv_cmd->tvc_sgl_count = 0;
509 return ret;
510 }
511
512 sg += ret;
513 sgl_count -= ret;
514 }
515 return 0;
516}
517
518static void tcm_vhost_submission_work(struct work_struct *work)
519{
520 struct tcm_vhost_cmd *tv_cmd =
521 container_of(work, struct tcm_vhost_cmd, work);
9f0abc15 522 struct tcm_vhost_nexus *tv_nexus;
057cbf49
NB
523 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
524 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
525 int rc, sg_no_bidi = 0;
057cbf49
NB
526
527 if (tv_cmd->tvc_sgl_count) {
528 sg_ptr = tv_cmd->tvc_sgl;
057cbf49
NB
529/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
530#if 0
531 if (se_cmd->se_cmd_flags & SCF_BIDI) {
532 sg_bidi_ptr = NULL;
533 sg_no_bidi = 0;
534 }
535#endif
536 } else {
537 sg_ptr = NULL;
538 }
9f0abc15
NB
539 tv_nexus = tv_cmd->tvc_nexus;
540
541 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
542 tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
543 tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
544 tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
545 0, sg_ptr, tv_cmd->tvc_sgl_count,
546 sg_bidi_ptr, sg_no_bidi);
057cbf49
NB
547 if (rc < 0) {
548 transport_send_check_condition_and_sense(se_cmd,
9f0abc15 549 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
057cbf49 550 transport_generic_free_cmd(se_cmd, 0);
057cbf49 551 }
057cbf49
NB
552}
553
554static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
555{
556 struct vhost_virtqueue *vq = &vs->vqs[2];
557 struct virtio_scsi_cmd_req v_req;
558 struct tcm_vhost_tpg *tv_tpg;
559 struct tcm_vhost_cmd *tv_cmd;
560 u32 exp_data_len, data_first, data_num, data_direction;
561 unsigned out, in, i;
562 int head, ret;
563
564 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
565 tv_tpg = vs->vs_tpg;
71f1e45a 566 if (unlikely(!tv_tpg))
057cbf49 567 return;
057cbf49
NB
568
569 mutex_lock(&vq->mutex);
570 vhost_disable_notify(&vs->dev, vq);
571
572 for (;;) {
573 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
574 ARRAY_SIZE(vq->iov), &out, &in,
575 NULL, NULL);
576 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
577 head, out, in);
578 /* On error, stop handling until the next kick. */
579 if (unlikely(head < 0))
580 break;
581 /* Nothing new? Wait for eventfd to tell us they refilled. */
582 if (head == vq->num) {
583 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
584 vhost_disable_notify(&vs->dev, vq);
585 continue;
586 }
587 break;
588 }
589
590/* FIXME: BIDI operation */
591 if (out == 1 && in == 1) {
592 data_direction = DMA_NONE;
593 data_first = 0;
594 data_num = 0;
595 } else if (out == 1 && in > 1) {
596 data_direction = DMA_FROM_DEVICE;
597 data_first = out + 1;
598 data_num = in - 1;
599 } else if (out > 1 && in == 1) {
600 data_direction = DMA_TO_DEVICE;
601 data_first = 1;
602 data_num = out - 1;
603 } else {
604 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
605 out, in);
606 break;
607 }
608
609 /*
610 * Check for a sane resp buffer so we can report errors to
611 * the guest.
612 */
613 if (unlikely(vq->iov[out].iov_len !=
614 sizeof(struct virtio_scsi_cmd_resp))) {
615 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
616 " bytes\n", vq->iov[out].iov_len);
617 break;
618 }
619
620 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
621 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
622 " bytes\n", vq->iov[0].iov_len);
623 break;
624 }
625 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
626 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
627 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
628 sizeof(v_req));
629 if (unlikely(ret)) {
630 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
631 break;
632 }
633
634 exp_data_len = 0;
635 for (i = 0; i < data_num; i++)
636 exp_data_len += vq->iov[data_first + i].iov_len;
637
638 tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
639 exp_data_len, data_direction);
640 if (IS_ERR(tv_cmd)) {
641 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
642 PTR_ERR(tv_cmd));
643 break;
644 }
645 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
646 ": %d\n", tv_cmd, exp_data_len, data_direction);
647
648 tv_cmd->tvc_vhost = vs;
649
650 if (unlikely(vq->iov[out].iov_len !=
651 sizeof(struct virtio_scsi_cmd_resp))) {
652 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
653 " bytes, out: %d, in: %d\n",
654 vq->iov[out].iov_len, out, in);
655 break;
656 }
657
658 tv_cmd->tvc_resp = vq->iov[out].iov_base;
659
660 /*
661 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
662 * that will be used by tcm_vhost_new_cmd_map() and down into
663 * target_setup_cmd_from_cdb()
664 */
665 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
666 /*
667 * Check that the recieved CDB size does not exceeded our
668 * hardcoded max for tcm_vhost
669 */
670 /* TODO what if cdb was too small for varlen cdb header? */
671 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
672 TCM_VHOST_MAX_CDB_SIZE)) {
673 vq_err(vq, "Received SCSI CDB with command_size: %d that"
674 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
675 scsi_command_size(tv_cmd->tvc_cdb),
676 TCM_VHOST_MAX_CDB_SIZE);
677 break; /* TODO */
678 }
679 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
680
681 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
682 tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
683
684 if (data_direction != DMA_NONE) {
685 ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
686 &vq->iov[data_first], data_num,
687 data_direction == DMA_TO_DEVICE);
688 if (unlikely(ret)) {
689 vq_err(vq, "Failed to map iov to sgl\n");
690 break; /* TODO */
691 }
692 }
693
694 /*
695 * Save the descriptor from vhost_get_vq_desc() to be used to
696 * complete the virtio-scsi request in TCM callback context via
697 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
698 */
699 tv_cmd->tvc_vq_desc = head;
700 /*
701 * Dispatch tv_cmd descriptor for cmwq execution in process
702 * context provided by tcm_vhost_workqueue. This also ensures
703 * tv_cmd is executed on the same kworker CPU as this vhost
704 * thread to gain positive L2 cache locality effects..
705 */
706 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
707 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
708 }
709
710 mutex_unlock(&vq->mutex);
711}
712
713static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
714{
101998f6 715 pr_debug("%s: The handling func for control queue.\n", __func__);
057cbf49
NB
716}
717
718static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
719{
101998f6 720 pr_debug("%s: The handling func for event queue.\n", __func__);
057cbf49
NB
721}
722
723static void vhost_scsi_handle_kick(struct vhost_work *work)
724{
725 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
726 poll.work);
727 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
728
729 vhost_scsi_handle_vq(vs);
730}
731
732/*
733 * Called from vhost_scsi_ioctl() context to walk the list of available
734 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
735 */
736static int vhost_scsi_set_endpoint(
737 struct vhost_scsi *vs,
738 struct vhost_scsi_target *t)
739{
740 struct tcm_vhost_tport *tv_tport;
741 struct tcm_vhost_tpg *tv_tpg;
742 int index;
743
744 mutex_lock(&vs->dev.mutex);
745 /* Verify that ring has been setup correctly. */
746 for (index = 0; index < vs->dev.nvqs; ++index) {
747 /* Verify that ring has been setup correctly. */
748 if (!vhost_vq_access_ok(&vs->vqs[index])) {
749 mutex_unlock(&vs->dev.mutex);
750 return -EFAULT;
751 }
752 }
057cbf49
NB
753 mutex_unlock(&vs->dev.mutex);
754
755 mutex_lock(&tcm_vhost_mutex);
756 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
757 mutex_lock(&tv_tpg->tv_tpg_mutex);
758 if (!tv_tpg->tpg_nexus) {
759 mutex_unlock(&tv_tpg->tv_tpg_mutex);
760 continue;
761 }
101998f6 762 if (tv_tpg->tv_tpg_vhost_count != 0) {
057cbf49
NB
763 mutex_unlock(&tv_tpg->tv_tpg_mutex);
764 continue;
765 }
766 tv_tport = tv_tpg->tport;
767
768 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
769 (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
101998f6 770 tv_tpg->tv_tpg_vhost_count++;
057cbf49
NB
771 mutex_unlock(&tv_tpg->tv_tpg_mutex);
772 mutex_unlock(&tcm_vhost_mutex);
773
774 mutex_lock(&vs->dev.mutex);
101998f6
NB
775 if (vs->vs_tpg) {
776 mutex_unlock(&vs->dev.mutex);
777 mutex_lock(&tv_tpg->tv_tpg_mutex);
778 tv_tpg->tv_tpg_vhost_count--;
779 mutex_unlock(&tv_tpg->tv_tpg_mutex);
780 return -EEXIST;
781 }
782
057cbf49 783 vs->vs_tpg = tv_tpg;
057cbf49
NB
784 smp_mb__after_atomic_inc();
785 mutex_unlock(&vs->dev.mutex);
786 return 0;
787 }
788 mutex_unlock(&tv_tpg->tv_tpg_mutex);
789 }
790 mutex_unlock(&tcm_vhost_mutex);
791 return -EINVAL;
792}
793
794static int vhost_scsi_clear_endpoint(
795 struct vhost_scsi *vs,
796 struct vhost_scsi_target *t)
797{
798 struct tcm_vhost_tport *tv_tport;
799 struct tcm_vhost_tpg *tv_tpg;
101998f6 800 int index, ret;
057cbf49
NB
801
802 mutex_lock(&vs->dev.mutex);
803 /* Verify that ring has been setup correctly. */
804 for (index = 0; index < vs->dev.nvqs; ++index) {
805 if (!vhost_vq_access_ok(&vs->vqs[index])) {
101998f6
NB
806 ret = -EFAULT;
807 goto err;
057cbf49
NB
808 }
809 }
810
811 if (!vs->vs_tpg) {
101998f6
NB
812 ret = -ENODEV;
813 goto err;
057cbf49
NB
814 }
815 tv_tpg = vs->vs_tpg;
816 tv_tport = tv_tpg->tport;
817
818 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
819 (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
057cbf49
NB
820 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
821 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
822 tv_tport->tport_name, tv_tpg->tport_tpgt,
823 t->vhost_wwpn, t->vhost_tpgt);
101998f6
NB
824 ret = -EINVAL;
825 goto err;
057cbf49 826 }
101998f6 827 tv_tpg->tv_tpg_vhost_count--;
057cbf49
NB
828 vs->vs_tpg = NULL;
829 mutex_unlock(&vs->dev.mutex);
830
831 return 0;
101998f6
NB
832
833err:
834 mutex_unlock(&vs->dev.mutex);
835 return ret;
057cbf49
NB
836}
837
838static int vhost_scsi_open(struct inode *inode, struct file *f)
839{
840 struct vhost_scsi *s;
841 int r;
842
843 s = kzalloc(sizeof(*s), GFP_KERNEL);
844 if (!s)
845 return -ENOMEM;
846
847 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
057cbf49 848
101998f6
NB
849 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
850 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
851 s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
057cbf49
NB
852 r = vhost_dev_init(&s->dev, s->vqs, 3);
853 if (r < 0) {
854 kfree(s);
855 return r;
856 }
857
858 f->private_data = s;
859 return 0;
860}
861
862static int vhost_scsi_release(struct inode *inode, struct file *f)
863{
864 struct vhost_scsi *s = f->private_data;
865
866 if (s->vs_tpg && s->vs_tpg->tport) {
867 struct vhost_scsi_target backend;
868
869 memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
870 sizeof(backend.vhost_wwpn));
871 backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
872 vhost_scsi_clear_endpoint(s, &backend);
873 }
874
b211616d 875 vhost_dev_stop(&s->dev);
057cbf49
NB
876 vhost_dev_cleanup(&s->dev, false);
877 kfree(s);
878 return 0;
879}
880
101998f6
NB
881static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
882{
883 vhost_poll_flush(&vs->dev.vqs[index].poll);
884}
885
886static void vhost_scsi_flush(struct vhost_scsi *vs)
887{
888 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
889 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
890 vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
891}
892
057cbf49
NB
893static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
894{
895 if (features & ~VHOST_FEATURES)
896 return -EOPNOTSUPP;
897
898 mutex_lock(&vs->dev.mutex);
899 if ((features & (1 << VHOST_F_LOG_ALL)) &&
900 !vhost_log_access_ok(&vs->dev)) {
901 mutex_unlock(&vs->dev.mutex);
902 return -EFAULT;
903 }
904 vs->dev.acked_features = features;
101998f6
NB
905 smp_wmb();
906 vhost_scsi_flush(vs);
057cbf49
NB
907 mutex_unlock(&vs->dev.mutex);
908 return 0;
909}
910
911static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
912 unsigned long arg)
913{
914 struct vhost_scsi *vs = f->private_data;
915 struct vhost_scsi_target backend;
916 void __user *argp = (void __user *)arg;
917 u64 __user *featurep = argp;
918 u64 features;
101998f6 919 int r, abi_version = VHOST_SCSI_ABI_VERSION;
057cbf49
NB
920
921 switch (ioctl) {
922 case VHOST_SCSI_SET_ENDPOINT:
923 if (copy_from_user(&backend, argp, sizeof backend))
924 return -EFAULT;
6de7145c
MT
925 if (backend.reserved != 0)
926 return -EOPNOTSUPP;
057cbf49
NB
927
928 return vhost_scsi_set_endpoint(vs, &backend);
929 case VHOST_SCSI_CLEAR_ENDPOINT:
930 if (copy_from_user(&backend, argp, sizeof backend))
931 return -EFAULT;
6de7145c
MT
932 if (backend.reserved != 0)
933 return -EOPNOTSUPP;
057cbf49
NB
934
935 return vhost_scsi_clear_endpoint(vs, &backend);
936 case VHOST_SCSI_GET_ABI_VERSION:
101998f6 937 if (copy_to_user(argp, &abi_version, sizeof abi_version))
057cbf49
NB
938 return -EFAULT;
939 return 0;
940 case VHOST_GET_FEATURES:
941 features = VHOST_FEATURES;
942 if (copy_to_user(featurep, &features, sizeof features))
943 return -EFAULT;
944 return 0;
945 case VHOST_SET_FEATURES:
946 if (copy_from_user(&features, featurep, sizeof features))
947 return -EFAULT;
948 return vhost_scsi_set_features(vs, features);
949 default:
950 mutex_lock(&vs->dev.mutex);
935cdee7
MT
951 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
952 /* TODO: flush backend after dev ioctl. */
953 if (r == -ENOIOCTLCMD)
954 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
057cbf49
NB
955 mutex_unlock(&vs->dev.mutex);
956 return r;
957 }
958}
959
101998f6
NB
960#ifdef CONFIG_COMPAT
961static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
962 unsigned long arg)
963{
964 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
965}
966#endif
967
057cbf49
NB
968static const struct file_operations vhost_scsi_fops = {
969 .owner = THIS_MODULE,
970 .release = vhost_scsi_release,
971 .unlocked_ioctl = vhost_scsi_ioctl,
101998f6
NB
972#ifdef CONFIG_COMPAT
973 .compat_ioctl = vhost_scsi_compat_ioctl,
974#endif
057cbf49
NB
975 .open = vhost_scsi_open,
976 .llseek = noop_llseek,
977};
978
979static struct miscdevice vhost_scsi_misc = {
980 MISC_DYNAMIC_MINOR,
981 "vhost-scsi",
982 &vhost_scsi_fops,
983};
984
985static int __init vhost_scsi_register(void)
986{
987 return misc_register(&vhost_scsi_misc);
988}
989
990static int vhost_scsi_deregister(void)
991{
992 return misc_deregister(&vhost_scsi_misc);
993}
994
995static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
996{
997 switch (tport->tport_proto_id) {
998 case SCSI_PROTOCOL_SAS:
999 return "SAS";
1000 case SCSI_PROTOCOL_FCP:
1001 return "FCP";
1002 case SCSI_PROTOCOL_ISCSI:
1003 return "iSCSI";
1004 default:
1005 break;
1006 }
1007
1008 return "Unknown";
1009}
1010
101998f6 1011static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
057cbf49
NB
1012 struct se_lun *lun)
1013{
1014 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1015 struct tcm_vhost_tpg, se_tpg);
1016
101998f6
NB
1017 mutex_lock(&tv_tpg->tv_tpg_mutex);
1018 tv_tpg->tv_tpg_port_count++;
1019 mutex_unlock(&tv_tpg->tv_tpg_mutex);
057cbf49
NB
1020
1021 return 0;
1022}
1023
101998f6 1024static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
057cbf49
NB
1025 struct se_lun *se_lun)
1026{
1027 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1028 struct tcm_vhost_tpg, se_tpg);
1029
101998f6
NB
1030 mutex_lock(&tv_tpg->tv_tpg_mutex);
1031 tv_tpg->tv_tpg_port_count--;
1032 mutex_unlock(&tv_tpg->tv_tpg_mutex);
057cbf49
NB
1033}
1034
1035static struct se_node_acl *tcm_vhost_make_nodeacl(
1036 struct se_portal_group *se_tpg,
1037 struct config_group *group,
1038 const char *name)
1039{
1040 struct se_node_acl *se_nacl, *se_nacl_new;
1041 struct tcm_vhost_nacl *nacl;
1042 u64 wwpn = 0;
1043 u32 nexus_depth;
1044
1045 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1046 return ERR_PTR(-EINVAL); */
1047 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1048 if (!se_nacl_new)
1049 return ERR_PTR(-ENOMEM);
1050
1051 nexus_depth = 1;
1052 /*
1053 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1054 * when converting a NodeACL from demo mode -> explict
1055 */
1056 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1057 name, nexus_depth);
1058 if (IS_ERR(se_nacl)) {
1059 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1060 return se_nacl;
1061 }
1062 /*
1063 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1064 */
1065 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1066 nacl->iport_wwpn = wwpn;
1067
1068 return se_nacl;
1069}
1070
1071static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1072{
1073 struct tcm_vhost_nacl *nacl = container_of(se_acl,
1074 struct tcm_vhost_nacl, se_node_acl);
1075 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1076 kfree(nacl);
1077}
1078
101998f6 1079static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
057cbf49
NB
1080 const char *name)
1081{
1082 struct se_portal_group *se_tpg;
1083 struct tcm_vhost_nexus *tv_nexus;
1084
1085 mutex_lock(&tv_tpg->tv_tpg_mutex);
1086 if (tv_tpg->tpg_nexus) {
1087 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1088 pr_debug("tv_tpg->tpg_nexus already exists\n");
1089 return -EEXIST;
1090 }
1091 se_tpg = &tv_tpg->se_tpg;
1092
1093 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1094 if (!tv_nexus) {
1095 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1096 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1097 return -ENOMEM;
1098 }
1099 /*
1100 * Initialize the struct se_session pointer
1101 */
1102 tv_nexus->tvn_se_sess = transport_init_session();
1103 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1104 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1105 kfree(tv_nexus);
1106 return -ENOMEM;
1107 }
1108 /*
1109 * Since we are running in 'demo mode' this call with generate a
1110 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1111 * the SCSI Initiator port name of the passed configfs group 'name'.
1112 */
1113 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1114 se_tpg, (unsigned char *)name);
1115 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1116 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1117 pr_debug("core_tpg_check_initiator_node_acl() failed"
1118 " for %s\n", name);
1119 transport_free_session(tv_nexus->tvn_se_sess);
1120 kfree(tv_nexus);
1121 return -ENOMEM;
1122 }
1123 /*
101998f6 1124 * Now register the TCM vhost virtual I_T Nexus as active with the
057cbf49
NB
1125 * call to __transport_register_session()
1126 */
1127 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1128 tv_nexus->tvn_se_sess, tv_nexus);
1129 tv_tpg->tpg_nexus = tv_nexus;
1130
1131 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1132 return 0;
1133}
1134
101998f6 1135static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
057cbf49
NB
1136{
1137 struct se_session *se_sess;
1138 struct tcm_vhost_nexus *tv_nexus;
1139
1140 mutex_lock(&tpg->tv_tpg_mutex);
1141 tv_nexus = tpg->tpg_nexus;
1142 if (!tv_nexus) {
1143 mutex_unlock(&tpg->tv_tpg_mutex);
1144 return -ENODEV;
1145 }
1146
1147 se_sess = tv_nexus->tvn_se_sess;
1148 if (!se_sess) {
1149 mutex_unlock(&tpg->tv_tpg_mutex);
1150 return -ENODEV;
1151 }
1152
101998f6 1153 if (tpg->tv_tpg_port_count != 0) {
057cbf49 1154 mutex_unlock(&tpg->tv_tpg_mutex);
101998f6 1155 pr_err("Unable to remove TCM_vhost I_T Nexus with"
057cbf49 1156 " active TPG port count: %d\n",
101998f6
NB
1157 tpg->tv_tpg_port_count);
1158 return -EBUSY;
057cbf49
NB
1159 }
1160
101998f6 1161 if (tpg->tv_tpg_vhost_count != 0) {
057cbf49 1162 mutex_unlock(&tpg->tv_tpg_mutex);
101998f6 1163 pr_err("Unable to remove TCM_vhost I_T Nexus with"
057cbf49 1164 " active TPG vhost count: %d\n",
101998f6
NB
1165 tpg->tv_tpg_vhost_count);
1166 return -EBUSY;
057cbf49
NB
1167 }
1168
101998f6 1169 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
057cbf49
NB
1170 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1171 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1172 /*
101998f6 1173 * Release the SCSI I_T Nexus to the emulated vhost Target Port
057cbf49
NB
1174 */
1175 transport_deregister_session(tv_nexus->tvn_se_sess);
1176 tpg->tpg_nexus = NULL;
1177 mutex_unlock(&tpg->tv_tpg_mutex);
1178
1179 kfree(tv_nexus);
1180 return 0;
1181}
1182
101998f6 1183static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
057cbf49
NB
1184 char *page)
1185{
1186 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1187 struct tcm_vhost_tpg, se_tpg);
1188 struct tcm_vhost_nexus *tv_nexus;
1189 ssize_t ret;
1190
1191 mutex_lock(&tv_tpg->tv_tpg_mutex);
1192 tv_nexus = tv_tpg->tpg_nexus;
1193 if (!tv_nexus) {
1194 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1195 return -ENODEV;
1196 }
1197 ret = snprintf(page, PAGE_SIZE, "%s\n",
1198 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1199 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1200
1201 return ret;
1202}
1203
101998f6 1204static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
057cbf49
NB
1205 const char *page,
1206 size_t count)
1207{
1208 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1209 struct tcm_vhost_tpg, se_tpg);
1210 struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1211 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1212 int ret;
1213 /*
1214 * Shutdown the active I_T nexus if 'NULL' is passed..
1215 */
1216 if (!strncmp(page, "NULL", 4)) {
1217 ret = tcm_vhost_drop_nexus(tv_tpg);
1218 return (!ret) ? count : ret;
1219 }
1220 /*
1221 * Otherwise make sure the passed virtual Initiator port WWN matches
1222 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1223 * tcm_vhost_make_nexus().
1224 */
1225 if (strlen(page) >= TCM_VHOST_NAMELEN) {
1226 pr_err("Emulated NAA Sas Address: %s, exceeds"
1227 " max: %d\n", page, TCM_VHOST_NAMELEN);
1228 return -EINVAL;
1229 }
1230 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1231
1232 ptr = strstr(i_port, "naa.");
1233 if (ptr) {
1234 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1235 pr_err("Passed SAS Initiator Port %s does not"
1236 " match target port protoid: %s\n", i_port,
1237 tcm_vhost_dump_proto_id(tport_wwn));
1238 return -EINVAL;
1239 }
1240 port_ptr = &i_port[0];
1241 goto check_newline;
1242 }
1243 ptr = strstr(i_port, "fc.");
1244 if (ptr) {
1245 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1246 pr_err("Passed FCP Initiator Port %s does not"
1247 " match target port protoid: %s\n", i_port,
1248 tcm_vhost_dump_proto_id(tport_wwn));
1249 return -EINVAL;
1250 }
1251 port_ptr = &i_port[3]; /* Skip over "fc." */
1252 goto check_newline;
1253 }
1254 ptr = strstr(i_port, "iqn.");
1255 if (ptr) {
1256 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1257 pr_err("Passed iSCSI Initiator Port %s does not"
1258 " match target port protoid: %s\n", i_port,
1259 tcm_vhost_dump_proto_id(tport_wwn));
1260 return -EINVAL;
1261 }
1262 port_ptr = &i_port[0];
1263 goto check_newline;
1264 }
1265 pr_err("Unable to locate prefix for emulated Initiator Port:"
1266 " %s\n", i_port);
1267 return -EINVAL;
1268 /*
1269 * Clear any trailing newline for the NAA WWN
1270 */
1271check_newline:
1272 if (i_port[strlen(i_port)-1] == '\n')
1273 i_port[strlen(i_port)-1] = '\0';
1274
1275 ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1276 if (ret < 0)
1277 return ret;
1278
1279 return count;
1280}
1281
1282TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1283
1284static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1285 &tcm_vhost_tpg_nexus.attr,
1286 NULL,
1287};
1288
101998f6 1289static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
057cbf49
NB
1290 struct config_group *group,
1291 const char *name)
1292{
1293 struct tcm_vhost_tport *tport = container_of(wwn,
1294 struct tcm_vhost_tport, tport_wwn);
1295
1296 struct tcm_vhost_tpg *tpg;
1297 unsigned long tpgt;
1298 int ret;
1299
1300 if (strstr(name, "tpgt_") != name)
1301 return ERR_PTR(-EINVAL);
1302 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1303 return ERR_PTR(-EINVAL);
1304
1305 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1306 if (!tpg) {
1307 pr_err("Unable to allocate struct tcm_vhost_tpg");
1308 return ERR_PTR(-ENOMEM);
1309 }
1310 mutex_init(&tpg->tv_tpg_mutex);
1311 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1312 tpg->tport = tport;
1313 tpg->tport_tpgt = tpgt;
1314
1315 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1316 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1317 if (ret < 0) {
1318 kfree(tpg);
1319 return NULL;
1320 }
1321 mutex_lock(&tcm_vhost_mutex);
1322 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1323 mutex_unlock(&tcm_vhost_mutex);
1324
1325 return &tpg->se_tpg;
1326}
1327
1328static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1329{
1330 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1331 struct tcm_vhost_tpg, se_tpg);
1332
1333 mutex_lock(&tcm_vhost_mutex);
1334 list_del(&tpg->tv_tpg_list);
1335 mutex_unlock(&tcm_vhost_mutex);
1336 /*
101998f6 1337 * Release the virtual I_T Nexus for this vhost TPG
057cbf49
NB
1338 */
1339 tcm_vhost_drop_nexus(tpg);
1340 /*
1341 * Deregister the se_tpg from TCM..
1342 */
1343 core_tpg_deregister(se_tpg);
1344 kfree(tpg);
1345}
1346
101998f6 1347static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
057cbf49
NB
1348 struct config_group *group,
1349 const char *name)
1350{
1351 struct tcm_vhost_tport *tport;
1352 char *ptr;
1353 u64 wwpn = 0;
1354 int off = 0;
1355
1356 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1357 return ERR_PTR(-EINVAL); */
1358
1359 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1360 if (!tport) {
1361 pr_err("Unable to allocate struct tcm_vhost_tport");
1362 return ERR_PTR(-ENOMEM);
1363 }
1364 tport->tport_wwpn = wwpn;
1365 /*
1366 * Determine the emulated Protocol Identifier and Target Port Name
1367 * based on the incoming configfs directory name.
1368 */
1369 ptr = strstr(name, "naa.");
1370 if (ptr) {
1371 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1372 goto check_len;
1373 }
1374 ptr = strstr(name, "fc.");
1375 if (ptr) {
1376 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1377 off = 3; /* Skip over "fc." */
1378 goto check_len;
1379 }
1380 ptr = strstr(name, "iqn.");
1381 if (ptr) {
1382 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1383 goto check_len;
1384 }
1385
1386 pr_err("Unable to locate prefix for emulated Target Port:"
1387 " %s\n", name);
1388 kfree(tport);
1389 return ERR_PTR(-EINVAL);
1390
1391check_len:
1392 if (strlen(name) >= TCM_VHOST_NAMELEN) {
1393 pr_err("Emulated %s Address: %s, exceeds"
1394 " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1395 TCM_VHOST_NAMELEN);
1396 kfree(tport);
1397 return ERR_PTR(-EINVAL);
1398 }
1399 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1400
1401 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1402 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1403
1404 return &tport->tport_wwn;
1405}
1406
1407static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1408{
1409 struct tcm_vhost_tport *tport = container_of(wwn,
1410 struct tcm_vhost_tport, tport_wwn);
1411
1412 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1413 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1414 tport->tport_name);
1415
1416 kfree(tport);
1417}
1418
1419static ssize_t tcm_vhost_wwn_show_attr_version(
1420 struct target_fabric_configfs *tf,
1421 char *page)
1422{
1423 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1424 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1425 utsname()->machine);
1426}
1427
1428TF_WWN_ATTR_RO(tcm_vhost, version);
1429
1430static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1431 &tcm_vhost_wwn_version.attr,
1432 NULL,
1433};
1434
1435static struct target_core_fabric_ops tcm_vhost_ops = {
1436 .get_fabric_name = tcm_vhost_get_fabric_name,
1437 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
1438 .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
1439 .tpg_get_tag = tcm_vhost_get_tag,
1440 .tpg_get_default_depth = tcm_vhost_get_default_depth,
1441 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
1442 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
1443 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
1444 .tpg_check_demo_mode = tcm_vhost_check_true,
1445 .tpg_check_demo_mode_cache = tcm_vhost_check_true,
1446 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
1447 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
1448 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
1449 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
1450 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
1451 .release_cmd = tcm_vhost_release_cmd,
1452 .shutdown_session = tcm_vhost_shutdown_session,
1453 .close_session = tcm_vhost_close_session,
1454 .sess_get_index = tcm_vhost_sess_get_index,
1455 .sess_get_initiator_sid = NULL,
1456 .write_pending = tcm_vhost_write_pending,
1457 .write_pending_status = tcm_vhost_write_pending_status,
1458 .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
1459 .get_task_tag = tcm_vhost_get_task_tag,
1460 .get_cmd_state = tcm_vhost_get_cmd_state,
1461 .queue_data_in = tcm_vhost_queue_data_in,
1462 .queue_status = tcm_vhost_queue_status,
1463 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
057cbf49
NB
1464 /*
1465 * Setup callers for generic logic in target_core_fabric_configfs.c
1466 */
1467 .fabric_make_wwn = tcm_vhost_make_tport,
1468 .fabric_drop_wwn = tcm_vhost_drop_tport,
1469 .fabric_make_tpg = tcm_vhost_make_tpg,
1470 .fabric_drop_tpg = tcm_vhost_drop_tpg,
1471 .fabric_post_link = tcm_vhost_port_link,
1472 .fabric_pre_unlink = tcm_vhost_port_unlink,
1473 .fabric_make_np = NULL,
1474 .fabric_drop_np = NULL,
1475 .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
1476 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
1477};
1478
1479static int tcm_vhost_register_configfs(void)
1480{
1481 struct target_fabric_configfs *fabric;
1482 int ret;
1483
1484 pr_debug("TCM_VHOST fabric module %s on %s/%s"
1485 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1486 utsname()->machine);
1487 /*
1488 * Register the top level struct config_item_type with TCM core
1489 */
1490 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
1491 if (IS_ERR(fabric)) {
1492 pr_err("target_fabric_configfs_init() failed\n");
1493 return PTR_ERR(fabric);
1494 }
1495 /*
1496 * Setup fabric->tf_ops from our local tcm_vhost_ops
1497 */
1498 fabric->tf_ops = tcm_vhost_ops;
1499 /*
1500 * Setup default attribute lists for various fabric->tf_cit_tmpl
1501 */
1502 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
1503 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
1504 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1505 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1506 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1507 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1508 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1509 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1510 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1511 /*
1512 * Register the fabric for use within TCM
1513 */
1514 ret = target_fabric_configfs_register(fabric);
1515 if (ret < 0) {
1516 pr_err("target_fabric_configfs_register() failed"
1517 " for TCM_VHOST\n");
1518 return ret;
1519 }
1520 /*
1521 * Setup our local pointer to *fabric
1522 */
1523 tcm_vhost_fabric_configfs = fabric;
1524 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
1525 return 0;
1526};
1527
1528static void tcm_vhost_deregister_configfs(void)
1529{
1530 if (!tcm_vhost_fabric_configfs)
1531 return;
1532
1533 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
1534 tcm_vhost_fabric_configfs = NULL;
1535 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
1536};
1537
1538static int __init tcm_vhost_init(void)
1539{
1540 int ret = -ENOMEM;
101998f6
NB
1541 /*
1542 * Use our own dedicated workqueue for submitting I/O into
1543 * target core to avoid contention within system_wq.
1544 */
057cbf49
NB
1545 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
1546 if (!tcm_vhost_workqueue)
1547 goto out;
1548
1549 ret = vhost_scsi_register();
1550 if (ret < 0)
1551 goto out_destroy_workqueue;
1552
1553 ret = tcm_vhost_register_configfs();
1554 if (ret < 0)
1555 goto out_vhost_scsi_deregister;
1556
1557 return 0;
1558
1559out_vhost_scsi_deregister:
1560 vhost_scsi_deregister();
1561out_destroy_workqueue:
1562 destroy_workqueue(tcm_vhost_workqueue);
1563out:
1564 return ret;
1565};
1566
1567static void tcm_vhost_exit(void)
1568{
1569 tcm_vhost_deregister_configfs();
1570 vhost_scsi_deregister();
1571 destroy_workqueue(tcm_vhost_workqueue);
1572};
1573
1574MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
1575MODULE_LICENSE("GPL");
1576module_init(tcm_vhost_init);
1577module_exit(tcm_vhost_exit);