1 /*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
4 * (C) Copyright 2010-2013 Datera, Inc.
5 * (C) Copyright 2010-2012 IBM Corp.
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 ****************************************************************************/
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
55 #define TCM_VHOST_VERSION "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59 #define TCM_VHOST_PREALLOC_SGLS 2048
60 #define TCM_VHOST_PREALLOC_PAGES 2048
62 struct vhost_scsi_inflight {
63 /* Wait for the flush operation to finish */
64 struct completion comp;
65 /* Refcount for the inflight reqs */
69 struct tcm_vhost_cmd {
70 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
72 /* virtio-scsi initiator task attribute */
74 /* virtio-scsi initiator data direction */
75 enum dma_data_direction tvc_data_direction;
76 /* Expected data transfer length from virtio-scsi header */
78 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
80 /* The number of scatterlists associated with this cmd */
82 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
84 /* Pointer to the SGL formatted memory from virtio-scsi */
85 struct scatterlist *tvc_sgl;
86 struct page **tvc_upages;
87 /* Pointer to response */
88 struct virtio_scsi_cmd_resp __user *tvc_resp;
89 /* Pointer to vhost_scsi for our device */
90 struct vhost_scsi *tvc_vhost;
91 /* Pointer to vhost_virtqueue for the cmd */
92 struct vhost_virtqueue *tvc_vq;
93 /* Pointer to vhost nexus memory */
94 struct tcm_vhost_nexus *tvc_nexus;
95 /* The TCM I/O descriptor that is accessed via container_of() */
96 struct se_cmd tvc_se_cmd;
97 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
98 struct work_struct work;
99 /* Copy of the incoming SCSI command descriptor block (CDB) */
100 unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
101 /* Sense buffer that will be mapped into outgoing status */
102 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
103 /* Completed commands list, serviced from vhost worker thread */
104 struct llist_node tvc_completion_list;
105 /* Used to track inflight cmd */
106 struct vhost_scsi_inflight *inflight;
109 struct tcm_vhost_nexus {
110 /* Pointer to TCM session for I_T Nexus */
111 struct se_session *tvn_se_sess;
114 struct tcm_vhost_nacl {
115 /* Binary World Wide unique Port Name for Vhost Initiator port */
117 /* ASCII formatted WWPN for Sas Initiator port */
118 char iport_name[TCM_VHOST_NAMELEN];
119 /* Returned by tcm_vhost_make_nodeacl() */
120 struct se_node_acl se_node_acl;
123 struct tcm_vhost_tpg {
124 /* Vhost port target portal group tag for TCM */
126 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
127 int tv_tpg_port_count;
128 /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
129 int tv_tpg_vhost_count;
130 /* list for tcm_vhost_list */
131 struct list_head tv_tpg_list;
132 /* Used to protect access for tpg_nexus */
133 struct mutex tv_tpg_mutex;
134 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
135 struct tcm_vhost_nexus *tpg_nexus;
136 /* Pointer back to tcm_vhost_tport */
137 struct tcm_vhost_tport *tport;
138 /* Returned by tcm_vhost_make_tpg() */
139 struct se_portal_group se_tpg;
140 /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
141 struct vhost_scsi *vhost_scsi;
144 struct tcm_vhost_tport {
145 /* SCSI protocol the tport is providing */
147 /* Binary World Wide unique Port Name for Vhost Target port */
149 /* ASCII formatted WWPN for Vhost Target port */
150 char tport_name[TCM_VHOST_NAMELEN];
151 /* Returned by tcm_vhost_make_tport() */
152 struct se_wwn tport_wwn;
155 struct tcm_vhost_evt {
156 /* event to be sent to guest */
157 struct virtio_scsi_event event;
158 /* event list, serviced from vhost worker thread */
159 struct llist_node list;
163 VHOST_SCSI_VQ_CTL = 0,
164 VHOST_SCSI_VQ_EVT = 1,
165 VHOST_SCSI_VQ_IO = 2,
169 VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
172 #define VHOST_SCSI_MAX_TARGET 256
173 #define VHOST_SCSI_MAX_VQ 128
174 #define VHOST_SCSI_MAX_EVENT 128
176 struct vhost_scsi_virtqueue {
177 struct vhost_virtqueue vq;
179 * Reference counting for inflight reqs, used for flush operation. At
180 * each time, one reference tracks new commands submitted, while we
181 * wait for another one to reach 0.
183 struct vhost_scsi_inflight inflights[2];
185 * Indicate current inflight in use, protected by vq->mutex.
186 * Writers must also take dev mutex and flush under it.
192 /* Protected by vhost_scsi->dev.mutex */
193 struct tcm_vhost_tpg **vs_tpg;
194 char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
196 struct vhost_dev dev;
197 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
199 struct vhost_work vs_completion_work; /* cmd completion work item */
200 struct llist_head vs_completion_list; /* cmd completion queue */
202 struct vhost_work vs_event_work; /* evt injection work item */
203 struct llist_head vs_event_list; /* evt injection queue */
205 bool vs_events_missed; /* any missed events, protected by vq->mutex */
206 int vs_events_nr; /* num of pending events, protected by vq->mutex */
209 /* Local pointer to allocated TCM configfs fabric module */
210 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
212 static struct workqueue_struct *tcm_vhost_workqueue;
214 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
215 static DEFINE_MUTEX(tcm_vhost_mutex);
216 static LIST_HEAD(tcm_vhost_list);
218 static int iov_num_pages(struct iovec *iov)
220 return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
221 ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
224 static void tcm_vhost_done_inflight(struct kref *kref)
226 struct vhost_scsi_inflight *inflight;
228 inflight = container_of(kref, struct vhost_scsi_inflight, kref);
229 complete(&inflight->comp);
232 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
233 struct vhost_scsi_inflight *old_inflight[])
235 struct vhost_scsi_inflight *new_inflight;
236 struct vhost_virtqueue *vq;
239 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
242 mutex_lock(&vq->mutex);
244 /* store old infight */
245 idx = vs->vqs[i].inflight_idx;
247 old_inflight[i] = &vs->vqs[i].inflights[idx];
249 /* setup new infight */
250 vs->vqs[i].inflight_idx = idx ^ 1;
251 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
252 kref_init(&new_inflight->kref);
253 init_completion(&new_inflight->comp);
255 mutex_unlock(&vq->mutex);
259 static struct vhost_scsi_inflight *
260 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
262 struct vhost_scsi_inflight *inflight;
263 struct vhost_scsi_virtqueue *svq;
265 svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
266 inflight = &svq->inflights[svq->inflight_idx];
267 kref_get(&inflight->kref);
272 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
274 kref_put(&inflight->kref, tcm_vhost_done_inflight);
277 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
282 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
287 static char *tcm_vhost_get_fabric_name(void)
292 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
294 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
295 struct tcm_vhost_tpg, se_tpg);
296 struct tcm_vhost_tport *tport = tpg->tport;
298 switch (tport->tport_proto_id) {
299 case SCSI_PROTOCOL_SAS:
300 return sas_get_fabric_proto_ident(se_tpg);
301 case SCSI_PROTOCOL_FCP:
302 return fc_get_fabric_proto_ident(se_tpg);
303 case SCSI_PROTOCOL_ISCSI:
304 return iscsi_get_fabric_proto_ident(se_tpg);
306 pr_err("Unknown tport_proto_id: 0x%02x, using"
307 " SAS emulation\n", tport->tport_proto_id);
311 return sas_get_fabric_proto_ident(se_tpg);
314 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
316 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
317 struct tcm_vhost_tpg, se_tpg);
318 struct tcm_vhost_tport *tport = tpg->tport;
320 return &tport->tport_name[0];
323 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
325 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
326 struct tcm_vhost_tpg, se_tpg);
327 return tpg->tport_tpgt;
330 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
336 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
337 struct se_node_acl *se_nacl,
338 struct t10_pr_registration *pr_reg,
342 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
343 struct tcm_vhost_tpg, se_tpg);
344 struct tcm_vhost_tport *tport = tpg->tport;
346 switch (tport->tport_proto_id) {
347 case SCSI_PROTOCOL_SAS:
348 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
350 case SCSI_PROTOCOL_FCP:
351 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
353 case SCSI_PROTOCOL_ISCSI:
354 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
357 pr_err("Unknown tport_proto_id: 0x%02x, using"
358 " SAS emulation\n", tport->tport_proto_id);
362 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
367 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
368 struct se_node_acl *se_nacl,
369 struct t10_pr_registration *pr_reg,
372 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
373 struct tcm_vhost_tpg, se_tpg);
374 struct tcm_vhost_tport *tport = tpg->tport;
376 switch (tport->tport_proto_id) {
377 case SCSI_PROTOCOL_SAS:
378 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
380 case SCSI_PROTOCOL_FCP:
381 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
383 case SCSI_PROTOCOL_ISCSI:
384 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
387 pr_err("Unknown tport_proto_id: 0x%02x, using"
388 " SAS emulation\n", tport->tport_proto_id);
392 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
397 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
400 char **port_nexus_ptr)
402 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
403 struct tcm_vhost_tpg, se_tpg);
404 struct tcm_vhost_tport *tport = tpg->tport;
406 switch (tport->tport_proto_id) {
407 case SCSI_PROTOCOL_SAS:
408 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
410 case SCSI_PROTOCOL_FCP:
411 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
413 case SCSI_PROTOCOL_ISCSI:
414 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
417 pr_err("Unknown tport_proto_id: 0x%02x, using"
418 " SAS emulation\n", tport->tport_proto_id);
422 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
426 static struct se_node_acl *
427 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
429 struct tcm_vhost_nacl *nacl;
431 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
433 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
437 return &nacl->se_node_acl;
441 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
442 struct se_node_acl *se_nacl)
444 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
445 struct tcm_vhost_nacl, se_node_acl);
449 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
454 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
456 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
457 struct tcm_vhost_cmd, tvc_se_cmd);
458 struct se_session *se_sess = se_cmd->se_sess;
460 if (tv_cmd->tvc_sgl_count) {
462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
463 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
466 tcm_vhost_put_inflight(tv_cmd->inflight);
467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
470 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
475 static void tcm_vhost_close_session(struct se_session *se_sess)
480 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
485 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
487 /* Go ahead and process the write immediately */
488 target_execute_cmd(se_cmd);
492 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
497 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
502 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
507 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
512 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
514 struct vhost_scsi *vs = cmd->tvc_vhost;
516 llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
518 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
521 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
523 struct tcm_vhost_cmd *cmd = container_of(se_cmd,
524 struct tcm_vhost_cmd, tvc_se_cmd);
525 vhost_scsi_complete_cmd(cmd);
529 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
531 struct tcm_vhost_cmd *cmd = container_of(se_cmd,
532 struct tcm_vhost_cmd, tvc_se_cmd);
533 vhost_scsi_complete_cmd(cmd);
537 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
542 static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
547 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
553 static struct tcm_vhost_evt *
554 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
555 u32 event, u32 reason)
557 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
558 struct tcm_vhost_evt *evt;
560 if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
561 vs->vs_events_missed = true;
565 evt = kzalloc(sizeof(*evt), GFP_KERNEL);
567 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
568 vs->vs_events_missed = true;
572 evt->event.event = event;
573 evt->event.reason = reason;
579 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
581 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
583 /* TODO locking against target/backend threads? */
584 transport_generic_free_cmd(se_cmd, 0);
588 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
590 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
594 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
596 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
597 struct virtio_scsi_event *event = &evt->event;
598 struct virtio_scsi_event __user *eventp;
602 if (!vq->private_data) {
603 vs->vs_events_missed = true;
608 vhost_disable_notify(&vs->dev, vq);
609 head = vhost_get_vq_desc(vq, vq->iov,
610 ARRAY_SIZE(vq->iov), &out, &in,
613 vs->vs_events_missed = true;
616 if (head == vq->num) {
617 if (vhost_enable_notify(&vs->dev, vq))
619 vs->vs_events_missed = true;
623 if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
624 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
625 vq->iov[out].iov_len);
626 vs->vs_events_missed = true;
630 if (vs->vs_events_missed) {
631 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
632 vs->vs_events_missed = false;
635 eventp = vq->iov[out].iov_base;
636 ret = __copy_to_user(eventp, event, sizeof(*event));
638 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
640 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
643 static void tcm_vhost_evt_work(struct vhost_work *work)
645 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
647 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
648 struct tcm_vhost_evt *evt;
649 struct llist_node *llnode;
651 mutex_lock(&vq->mutex);
652 llnode = llist_del_all(&vs->vs_event_list);
654 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
655 llnode = llist_next(llnode);
656 tcm_vhost_do_evt_work(vs, evt);
657 tcm_vhost_free_evt(vs, evt);
659 mutex_unlock(&vq->mutex);
662 /* Fill in status and signal that we are done processing this command
664 * This is scheduled in the vhost work queue so we are called with the owner
665 * process mm and can access the vring.
667 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
669 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
671 DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
672 struct virtio_scsi_cmd_resp v_rsp;
673 struct tcm_vhost_cmd *cmd;
674 struct llist_node *llnode;
675 struct se_cmd *se_cmd;
678 bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
679 llnode = llist_del_all(&vs->vs_completion_list);
681 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
682 tvc_completion_list);
683 llnode = llist_next(llnode);
684 se_cmd = &cmd->tvc_se_cmd;
686 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
687 cmd, se_cmd->residual_count, se_cmd->scsi_status);
689 memset(&v_rsp, 0, sizeof(v_rsp));
690 v_rsp.resid = se_cmd->residual_count;
691 /* TODO is status_qualifier field needed? */
692 v_rsp.status = se_cmd->scsi_status;
693 v_rsp.sense_len = se_cmd->scsi_sense_length;
694 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
696 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
697 if (likely(ret == 0)) {
698 struct vhost_scsi_virtqueue *q;
699 vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
700 q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
702 __set_bit(vq, signal);
704 pr_err("Faulted on virtio_scsi_cmd_resp\n");
706 vhost_scsi_free_cmd(cmd);
710 while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
712 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
715 static struct tcm_vhost_cmd *
716 vhost_scsi_get_tag(struct vhost_virtqueue *vq,
717 struct tcm_vhost_tpg *tpg,
718 struct virtio_scsi_cmd_req *v_req,
722 struct tcm_vhost_cmd *cmd;
723 struct tcm_vhost_nexus *tv_nexus;
724 struct se_session *se_sess;
725 struct scatterlist *sg;
729 tv_nexus = tpg->tpg_nexus;
731 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
732 return ERR_PTR(-EIO);
734 se_sess = tv_nexus->tvn_se_sess;
736 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
738 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
739 return ERR_PTR(-ENOMEM);
742 cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
744 pages = cmd->tvc_upages;
745 memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
748 cmd->tvc_upages = pages;
749 cmd->tvc_se_cmd.map_tag = tag;
750 cmd->tvc_tag = v_req->tag;
751 cmd->tvc_task_attr = v_req->task_attr;
752 cmd->tvc_exp_data_len = exp_data_len;
753 cmd->tvc_data_direction = data_direction;
754 cmd->tvc_nexus = tv_nexus;
755 cmd->inflight = tcm_vhost_get_inflight(vq);
761 * Map a user memory range into a scatterlist
763 * Returns the number of scatterlist entries used or -errno on error.
766 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
767 struct scatterlist *sgl,
768 unsigned int sgl_count,
772 unsigned int npages = 0, pages_nr, offset, nbytes;
773 struct scatterlist *sg = sgl;
774 void __user *ptr = iov->iov_base;
775 size_t len = iov->iov_len;
779 if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
780 pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
781 " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
782 sgl_count, TCM_VHOST_PREALLOC_SGLS);
786 pages_nr = iov_num_pages(iov);
787 if (pages_nr > sgl_count)
790 if (pages_nr > TCM_VHOST_PREALLOC_PAGES) {
791 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
792 " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n",
793 pages_nr, TCM_VHOST_PREALLOC_PAGES);
797 pages = tv_cmd->tvc_upages;
799 ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
800 /* No pages were pinned */
803 /* Less pages pinned than wanted */
804 if (ret != pages_nr) {
805 for (i = 0; i < ret; i++)
812 offset = (uintptr_t)ptr & ~PAGE_MASK;
813 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
814 sg_set_page(sg, pages[npages], nbytes, offset);
826 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
834 struct scatterlist *sg;
837 * Find out how long sglist needs to be
840 for (i = 0; i < niov; i++)
841 sgl_count += iov_num_pages(&iov[i]);
843 /* TODO overflow checking */
846 pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
847 sg_init_table(sg, sgl_count);
849 cmd->tvc_sgl_count = sgl_count;
851 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
852 for (i = 0; i < niov; i++) {
853 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
856 for (i = 0; i < cmd->tvc_sgl_count; i++)
857 put_page(sg_page(&cmd->tvc_sgl[i]));
859 cmd->tvc_sgl_count = 0;
869 static void tcm_vhost_submission_work(struct work_struct *work)
871 struct tcm_vhost_cmd *cmd =
872 container_of(work, struct tcm_vhost_cmd, work);
873 struct tcm_vhost_nexus *tv_nexus;
874 struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
875 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
876 int rc, sg_no_bidi = 0;
878 if (cmd->tvc_sgl_count) {
879 sg_ptr = cmd->tvc_sgl;
880 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
882 if (se_cmd->se_cmd_flags & SCF_BIDI) {
890 tv_nexus = cmd->tvc_nexus;
892 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
893 cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
894 cmd->tvc_lun, cmd->tvc_exp_data_len,
895 cmd->tvc_task_attr, cmd->tvc_data_direction,
896 TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
897 sg_bidi_ptr, sg_no_bidi, NULL, 0);
899 transport_send_check_condition_and_sense(se_cmd,
900 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
901 transport_generic_free_cmd(se_cmd, 0);
906 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
907 struct vhost_virtqueue *vq,
908 int head, unsigned out)
910 struct virtio_scsi_cmd_resp __user *resp;
911 struct virtio_scsi_cmd_resp rsp;
914 memset(&rsp, 0, sizeof(rsp));
915 rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
916 resp = vq->iov[out].iov_base;
917 ret = __copy_to_user(resp, &rsp, sizeof(rsp));
919 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
921 pr_err("Faulted on virtio_scsi_cmd_resp\n");
925 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
927 struct tcm_vhost_tpg **vs_tpg;
928 struct virtio_scsi_cmd_req v_req;
929 struct tcm_vhost_tpg *tpg;
930 struct tcm_vhost_cmd *cmd;
931 u32 exp_data_len, data_first, data_num, data_direction;
936 mutex_lock(&vq->mutex);
938 * We can handle the vq only after the endpoint is setup by calling the
939 * VHOST_SCSI_SET_ENDPOINT ioctl.
941 vs_tpg = vq->private_data;
945 vhost_disable_notify(&vs->dev, vq);
948 head = vhost_get_vq_desc(vq, vq->iov,
949 ARRAY_SIZE(vq->iov), &out, &in,
951 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
953 /* On error, stop handling until the next kick. */
954 if (unlikely(head < 0))
956 /* Nothing new? Wait for eventfd to tell us they refilled. */
957 if (head == vq->num) {
958 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
959 vhost_disable_notify(&vs->dev, vq);
965 /* FIXME: BIDI operation */
966 if (out == 1 && in == 1) {
967 data_direction = DMA_NONE;
970 } else if (out == 1 && in > 1) {
971 data_direction = DMA_FROM_DEVICE;
972 data_first = out + 1;
974 } else if (out > 1 && in == 1) {
975 data_direction = DMA_TO_DEVICE;
979 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
985 * Check for a sane resp buffer so we can report errors to
988 if (unlikely(vq->iov[out].iov_len !=
989 sizeof(struct virtio_scsi_cmd_resp))) {
990 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
991 " bytes\n", vq->iov[out].iov_len);
995 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
996 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
997 " bytes\n", vq->iov[0].iov_len);
1000 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
1001 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
1002 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
1004 if (unlikely(ret)) {
1005 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1009 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1010 if (unlikely(v_req.lun[0] != 1)) {
1011 vhost_scsi_send_bad_target(vs, vq, head, out);
1015 /* Extract the tpgt */
1016 target = v_req.lun[1];
1017 tpg = ACCESS_ONCE(vs_tpg[target]);
1019 /* Target does not exist, fail the request */
1020 if (unlikely(!tpg)) {
1021 vhost_scsi_send_bad_target(vs, vq, head, out);
1026 for (i = 0; i < data_num; i++)
1027 exp_data_len += vq->iov[data_first + i].iov_len;
1029 cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
1030 exp_data_len, data_direction);
1032 vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1036 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1037 ": %d\n", cmd, exp_data_len, data_direction);
1039 cmd->tvc_vhost = vs;
1041 cmd->tvc_resp = vq->iov[out].iov_base;
1044 * Copy in the recieved CDB descriptor into cmd->tvc_cdb
1045 * that will be used by tcm_vhost_new_cmd_map() and down into
1046 * target_setup_cmd_from_cdb()
1048 memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1050 * Check that the recieved CDB size does not exceeded our
1051 * hardcoded max for tcm_vhost
1053 /* TODO what if cdb was too small for varlen cdb header? */
1054 if (unlikely(scsi_command_size(cmd->tvc_cdb) >
1055 TCM_VHOST_MAX_CDB_SIZE)) {
1056 vq_err(vq, "Received SCSI CDB with command_size: %d that"
1057 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1058 scsi_command_size(cmd->tvc_cdb),
1059 TCM_VHOST_MAX_CDB_SIZE);
1062 cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1064 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1065 cmd->tvc_cdb[0], cmd->tvc_lun);
1067 if (data_direction != DMA_NONE) {
1068 ret = vhost_scsi_map_iov_to_sgl(cmd,
1069 &vq->iov[data_first], data_num,
1070 data_direction == DMA_FROM_DEVICE);
1071 if (unlikely(ret)) {
1072 vq_err(vq, "Failed to map iov to sgl\n");
1078 * Save the descriptor from vhost_get_vq_desc() to be used to
1079 * complete the virtio-scsi request in TCM callback context via
1080 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1082 cmd->tvc_vq_desc = head;
1084 * Dispatch tv_cmd descriptor for cmwq execution in process
1085 * context provided by tcm_vhost_workqueue. This also ensures
1086 * tv_cmd is executed on the same kworker CPU as this vhost
1087 * thread to gain positive L2 cache locality effects..
1089 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1090 queue_work(tcm_vhost_workqueue, &cmd->work);
1093 mutex_unlock(&vq->mutex);
1097 vhost_scsi_free_cmd(cmd);
1099 vhost_scsi_send_bad_target(vs, vq, head, out);
1101 mutex_unlock(&vq->mutex);
1104 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1106 pr_debug("%s: The handling func for control queue.\n", __func__);
1110 tcm_vhost_send_evt(struct vhost_scsi *vs,
1111 struct tcm_vhost_tpg *tpg,
1116 struct tcm_vhost_evt *evt;
1118 evt = tcm_vhost_allocate_evt(vs, event, reason);
1123 /* TODO: share lun setup code with virtio-scsi.ko */
1125 * Note: evt->event is zeroed when we allocate it and
1126 * lun[4-7] need to be zero according to virtio-scsi spec.
1128 evt->event.lun[0] = 0x01;
1129 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1130 if (lun->unpacked_lun >= 256)
1131 evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1132 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1135 llist_add(&evt->list, &vs->vs_event_list);
1136 vhost_work_queue(&vs->dev, &vs->vs_event_work);
1139 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1141 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1143 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1145 mutex_lock(&vq->mutex);
1146 if (!vq->private_data)
1149 if (vs->vs_events_missed)
1150 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1152 mutex_unlock(&vq->mutex);
1155 static void vhost_scsi_handle_kick(struct vhost_work *work)
1157 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1159 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1161 vhost_scsi_handle_vq(vs, vq);
1164 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1166 vhost_poll_flush(&vs->vqs[index].vq.poll);
1169 /* Callers must hold dev mutex */
1170 static void vhost_scsi_flush(struct vhost_scsi *vs)
1172 struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1175 /* Init new inflight and remember the old inflight */
1176 tcm_vhost_init_inflight(vs, old_inflight);
1179 * The inflight->kref was initialized to 1. We decrement it here to
1180 * indicate the start of the flush operation so that it will reach 0
1181 * when all the reqs are finished.
1183 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1184 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1186 /* Flush both the vhost poll and vhost work */
1187 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1188 vhost_scsi_flush_vq(vs, i);
1189 vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1190 vhost_work_flush(&vs->dev, &vs->vs_event_work);
1192 /* Wait for all reqs issued before the flush to be finished */
1193 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1194 wait_for_completion(&old_inflight[i]->comp);
1198 * Called from vhost_scsi_ioctl() context to walk the list of available
1199 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1201 * The lock nesting rule is:
1202 * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1205 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1206 struct vhost_scsi_target *t)
1208 struct tcm_vhost_tport *tv_tport;
1209 struct tcm_vhost_tpg *tpg;
1210 struct tcm_vhost_tpg **vs_tpg;
1211 struct vhost_virtqueue *vq;
1212 int index, ret, i, len;
1215 mutex_lock(&tcm_vhost_mutex);
1216 mutex_lock(&vs->dev.mutex);
1218 /* Verify that ring has been setup correctly. */
1219 for (index = 0; index < vs->dev.nvqs; ++index) {
1220 /* Verify that ring has been setup correctly. */
1221 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1227 len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1228 vs_tpg = kzalloc(len, GFP_KERNEL);
1234 memcpy(vs_tpg, vs->vs_tpg, len);
1236 list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1237 mutex_lock(&tpg->tv_tpg_mutex);
1238 if (!tpg->tpg_nexus) {
1239 mutex_unlock(&tpg->tv_tpg_mutex);
1242 if (tpg->tv_tpg_vhost_count != 0) {
1243 mutex_unlock(&tpg->tv_tpg_mutex);
1246 tv_tport = tpg->tport;
1248 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1249 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1251 mutex_unlock(&tpg->tv_tpg_mutex);
1255 tpg->tv_tpg_vhost_count++;
1256 tpg->vhost_scsi = vs;
1257 vs_tpg[tpg->tport_tpgt] = tpg;
1258 smp_mb__after_atomic();
1261 mutex_unlock(&tpg->tv_tpg_mutex);
1265 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1266 sizeof(vs->vs_vhost_wwpn));
1267 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1268 vq = &vs->vqs[i].vq;
1269 mutex_lock(&vq->mutex);
1270 vq->private_data = vs_tpg;
1271 vhost_init_used(vq);
1272 mutex_unlock(&vq->mutex);
1280 * Act as synchronize_rcu to make sure access to
1281 * old vs->vs_tpg is finished.
1283 vhost_scsi_flush(vs);
1285 vs->vs_tpg = vs_tpg;
1288 mutex_unlock(&vs->dev.mutex);
1289 mutex_unlock(&tcm_vhost_mutex);
1294 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1295 struct vhost_scsi_target *t)
1297 struct tcm_vhost_tport *tv_tport;
1298 struct tcm_vhost_tpg *tpg;
1299 struct vhost_virtqueue *vq;
1304 mutex_lock(&tcm_vhost_mutex);
1305 mutex_lock(&vs->dev.mutex);
1306 /* Verify that ring has been setup correctly. */
1307 for (index = 0; index < vs->dev.nvqs; ++index) {
1308 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1319 for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1321 tpg = vs->vs_tpg[target];
1325 mutex_lock(&tpg->tv_tpg_mutex);
1326 tv_tport = tpg->tport;
1332 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1333 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1334 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1335 tv_tport->tport_name, tpg->tport_tpgt,
1336 t->vhost_wwpn, t->vhost_tpgt);
1340 tpg->tv_tpg_vhost_count--;
1341 tpg->vhost_scsi = NULL;
1342 vs->vs_tpg[target] = NULL;
1344 mutex_unlock(&tpg->tv_tpg_mutex);
1347 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1348 vq = &vs->vqs[i].vq;
1349 mutex_lock(&vq->mutex);
1350 vq->private_data = NULL;
1351 mutex_unlock(&vq->mutex);
1355 * Act as synchronize_rcu to make sure access to
1356 * old vs->vs_tpg is finished.
1358 vhost_scsi_flush(vs);
1361 WARN_ON(vs->vs_events_nr);
1362 mutex_unlock(&vs->dev.mutex);
1363 mutex_unlock(&tcm_vhost_mutex);
1367 mutex_unlock(&tpg->tv_tpg_mutex);
1369 mutex_unlock(&vs->dev.mutex);
1370 mutex_unlock(&tcm_vhost_mutex);
1374 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1376 struct vhost_virtqueue *vq;
1379 if (features & ~VHOST_SCSI_FEATURES)
1382 mutex_lock(&vs->dev.mutex);
1383 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1384 !vhost_log_access_ok(&vs->dev)) {
1385 mutex_unlock(&vs->dev.mutex);
1389 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1390 vq = &vs->vqs[i].vq;
1391 mutex_lock(&vq->mutex);
1392 vq->acked_features = features;
1393 mutex_unlock(&vq->mutex);
1395 mutex_unlock(&vs->dev.mutex);
1399 static void vhost_scsi_free(struct vhost_scsi *vs)
1401 if (is_vmalloc_addr(vs))
1407 static int vhost_scsi_open(struct inode *inode, struct file *f)
1409 struct vhost_scsi *vs;
1410 struct vhost_virtqueue **vqs;
1413 vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1415 vs = vzalloc(sizeof(*vs));
1420 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1424 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1425 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1427 vs->vs_events_nr = 0;
1428 vs->vs_events_missed = false;
1430 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1431 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1432 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1433 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1434 for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1435 vqs[i] = &vs->vqs[i].vq;
1436 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1438 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1440 tcm_vhost_init_inflight(vs, NULL);
1442 f->private_data = vs;
1446 vhost_scsi_free(vs);
1451 static int vhost_scsi_release(struct inode *inode, struct file *f)
1453 struct vhost_scsi *vs = f->private_data;
1454 struct vhost_scsi_target t;
1456 mutex_lock(&vs->dev.mutex);
1457 memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1458 mutex_unlock(&vs->dev.mutex);
1459 vhost_scsi_clear_endpoint(vs, &t);
1460 vhost_dev_stop(&vs->dev);
1461 vhost_dev_cleanup(&vs->dev, false);
1462 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1463 vhost_scsi_flush(vs);
1465 vhost_scsi_free(vs);
1470 vhost_scsi_ioctl(struct file *f,
1474 struct vhost_scsi *vs = f->private_data;
1475 struct vhost_scsi_target backend;
1476 void __user *argp = (void __user *)arg;
1477 u64 __user *featurep = argp;
1478 u32 __user *eventsp = argp;
1481 int r, abi_version = VHOST_SCSI_ABI_VERSION;
1482 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1485 case VHOST_SCSI_SET_ENDPOINT:
1486 if (copy_from_user(&backend, argp, sizeof backend))
1488 if (backend.reserved != 0)
1491 return vhost_scsi_set_endpoint(vs, &backend);
1492 case VHOST_SCSI_CLEAR_ENDPOINT:
1493 if (copy_from_user(&backend, argp, sizeof backend))
1495 if (backend.reserved != 0)
1498 return vhost_scsi_clear_endpoint(vs, &backend);
1499 case VHOST_SCSI_GET_ABI_VERSION:
1500 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1503 case VHOST_SCSI_SET_EVENTS_MISSED:
1504 if (get_user(events_missed, eventsp))
1506 mutex_lock(&vq->mutex);
1507 vs->vs_events_missed = events_missed;
1508 mutex_unlock(&vq->mutex);
1510 case VHOST_SCSI_GET_EVENTS_MISSED:
1511 mutex_lock(&vq->mutex);
1512 events_missed = vs->vs_events_missed;
1513 mutex_unlock(&vq->mutex);
1514 if (put_user(events_missed, eventsp))
1517 case VHOST_GET_FEATURES:
1518 features = VHOST_SCSI_FEATURES;
1519 if (copy_to_user(featurep, &features, sizeof features))
1522 case VHOST_SET_FEATURES:
1523 if (copy_from_user(&features, featurep, sizeof features))
1525 return vhost_scsi_set_features(vs, features);
1527 mutex_lock(&vs->dev.mutex);
1528 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1529 /* TODO: flush backend after dev ioctl. */
1530 if (r == -ENOIOCTLCMD)
1531 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1532 mutex_unlock(&vs->dev.mutex);
1537 #ifdef CONFIG_COMPAT
1538 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1541 return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1545 static const struct file_operations vhost_scsi_fops = {
1546 .owner = THIS_MODULE,
1547 .release = vhost_scsi_release,
1548 .unlocked_ioctl = vhost_scsi_ioctl,
1549 #ifdef CONFIG_COMPAT
1550 .compat_ioctl = vhost_scsi_compat_ioctl,
1552 .open = vhost_scsi_open,
1553 .llseek = noop_llseek,
1556 static struct miscdevice vhost_scsi_misc = {
1562 static int __init vhost_scsi_register(void)
1564 return misc_register(&vhost_scsi_misc);
1567 static int vhost_scsi_deregister(void)
1569 return misc_deregister(&vhost_scsi_misc);
1572 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1574 switch (tport->tport_proto_id) {
1575 case SCSI_PROTOCOL_SAS:
1577 case SCSI_PROTOCOL_FCP:
1579 case SCSI_PROTOCOL_ISCSI:
1589 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1590 struct se_lun *lun, bool plug)
1593 struct vhost_scsi *vs = tpg->vhost_scsi;
1594 struct vhost_virtqueue *vq;
1600 mutex_lock(&vs->dev.mutex);
1603 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1605 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1607 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1608 mutex_lock(&vq->mutex);
1609 if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1610 tcm_vhost_send_evt(vs, tpg, lun,
1611 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1612 mutex_unlock(&vq->mutex);
1613 mutex_unlock(&vs->dev.mutex);
1616 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1618 tcm_vhost_do_plug(tpg, lun, true);
1621 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1623 tcm_vhost_do_plug(tpg, lun, false);
1626 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1629 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1630 struct tcm_vhost_tpg, se_tpg);
1632 mutex_lock(&tcm_vhost_mutex);
1634 mutex_lock(&tpg->tv_tpg_mutex);
1635 tpg->tv_tpg_port_count++;
1636 mutex_unlock(&tpg->tv_tpg_mutex);
1638 tcm_vhost_hotplug(tpg, lun);
1640 mutex_unlock(&tcm_vhost_mutex);
1645 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1648 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1649 struct tcm_vhost_tpg, se_tpg);
1651 mutex_lock(&tcm_vhost_mutex);
1653 mutex_lock(&tpg->tv_tpg_mutex);
1654 tpg->tv_tpg_port_count--;
1655 mutex_unlock(&tpg->tv_tpg_mutex);
1657 tcm_vhost_hotunplug(tpg, lun);
1659 mutex_unlock(&tcm_vhost_mutex);
1662 static struct se_node_acl *
1663 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1664 struct config_group *group,
1667 struct se_node_acl *se_nacl, *se_nacl_new;
1668 struct tcm_vhost_nacl *nacl;
1672 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1673 return ERR_PTR(-EINVAL); */
1674 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1676 return ERR_PTR(-ENOMEM);
1680 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1681 * when converting a NodeACL from demo mode -> explict
1683 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1685 if (IS_ERR(se_nacl)) {
1686 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1690 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1692 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1693 nacl->iport_wwpn = wwpn;
1698 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1700 struct tcm_vhost_nacl *nacl = container_of(se_acl,
1701 struct tcm_vhost_nacl, se_node_acl);
1702 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1706 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1707 struct se_session *se_sess)
1709 struct tcm_vhost_cmd *tv_cmd;
1712 if (!se_sess->sess_cmd_map)
1715 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1716 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1718 kfree(tv_cmd->tvc_sgl);
1719 kfree(tv_cmd->tvc_upages);
1723 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1726 struct se_portal_group *se_tpg;
1727 struct se_session *se_sess;
1728 struct tcm_vhost_nexus *tv_nexus;
1729 struct tcm_vhost_cmd *tv_cmd;
1732 mutex_lock(&tpg->tv_tpg_mutex);
1733 if (tpg->tpg_nexus) {
1734 mutex_unlock(&tpg->tv_tpg_mutex);
1735 pr_debug("tpg->tpg_nexus already exists\n");
1738 se_tpg = &tpg->se_tpg;
1740 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1742 mutex_unlock(&tpg->tv_tpg_mutex);
1743 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1747 * Initialize the struct se_session pointer and setup tagpool
1748 * for struct tcm_vhost_cmd descriptors
1750 tv_nexus->tvn_se_sess = transport_init_session_tags(
1751 TCM_VHOST_DEFAULT_TAGS,
1752 sizeof(struct tcm_vhost_cmd),
1753 TARGET_PROT_NORMAL);
1754 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1755 mutex_unlock(&tpg->tv_tpg_mutex);
1759 se_sess = tv_nexus->tvn_se_sess;
1760 for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1761 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1763 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1764 TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1765 if (!tv_cmd->tvc_sgl) {
1766 mutex_unlock(&tpg->tv_tpg_mutex);
1767 pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1771 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1772 TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL);
1773 if (!tv_cmd->tvc_upages) {
1774 mutex_unlock(&tpg->tv_tpg_mutex);
1775 pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1780 * Since we are running in 'demo mode' this call with generate a
1781 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1782 * the SCSI Initiator port name of the passed configfs group 'name'.
1784 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1785 se_tpg, (unsigned char *)name);
1786 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1787 mutex_unlock(&tpg->tv_tpg_mutex);
1788 pr_debug("core_tpg_check_initiator_node_acl() failed"
1793 * Now register the TCM vhost virtual I_T Nexus as active with the
1794 * call to __transport_register_session()
1796 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1797 tv_nexus->tvn_se_sess, tv_nexus);
1798 tpg->tpg_nexus = tv_nexus;
1800 mutex_unlock(&tpg->tv_tpg_mutex);
1804 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1805 transport_free_session(se_sess);
1810 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1812 struct se_session *se_sess;
1813 struct tcm_vhost_nexus *tv_nexus;
1815 mutex_lock(&tpg->tv_tpg_mutex);
1816 tv_nexus = tpg->tpg_nexus;
1818 mutex_unlock(&tpg->tv_tpg_mutex);
1822 se_sess = tv_nexus->tvn_se_sess;
1824 mutex_unlock(&tpg->tv_tpg_mutex);
1828 if (tpg->tv_tpg_port_count != 0) {
1829 mutex_unlock(&tpg->tv_tpg_mutex);
1830 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1831 " active TPG port count: %d\n",
1832 tpg->tv_tpg_port_count);
1836 if (tpg->tv_tpg_vhost_count != 0) {
1837 mutex_unlock(&tpg->tv_tpg_mutex);
1838 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1839 " active TPG vhost count: %d\n",
1840 tpg->tv_tpg_vhost_count);
1844 pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1845 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1846 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1848 tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1850 * Release the SCSI I_T Nexus to the emulated vhost Target Port
1852 transport_deregister_session(tv_nexus->tvn_se_sess);
1853 tpg->tpg_nexus = NULL;
1854 mutex_unlock(&tpg->tv_tpg_mutex);
1860 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1863 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1864 struct tcm_vhost_tpg, se_tpg);
1865 struct tcm_vhost_nexus *tv_nexus;
1868 mutex_lock(&tpg->tv_tpg_mutex);
1869 tv_nexus = tpg->tpg_nexus;
1871 mutex_unlock(&tpg->tv_tpg_mutex);
1874 ret = snprintf(page, PAGE_SIZE, "%s\n",
1875 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1876 mutex_unlock(&tpg->tv_tpg_mutex);
1881 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1885 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1886 struct tcm_vhost_tpg, se_tpg);
1887 struct tcm_vhost_tport *tport_wwn = tpg->tport;
1888 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1891 * Shutdown the active I_T nexus if 'NULL' is passed..
1893 if (!strncmp(page, "NULL", 4)) {
1894 ret = tcm_vhost_drop_nexus(tpg);
1895 return (!ret) ? count : ret;
1898 * Otherwise make sure the passed virtual Initiator port WWN matches
1899 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1900 * tcm_vhost_make_nexus().
1902 if (strlen(page) >= TCM_VHOST_NAMELEN) {
1903 pr_err("Emulated NAA Sas Address: %s, exceeds"
1904 " max: %d\n", page, TCM_VHOST_NAMELEN);
1907 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1909 ptr = strstr(i_port, "naa.");
1911 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1912 pr_err("Passed SAS Initiator Port %s does not"
1913 " match target port protoid: %s\n", i_port,
1914 tcm_vhost_dump_proto_id(tport_wwn));
1917 port_ptr = &i_port[0];
1920 ptr = strstr(i_port, "fc.");
1922 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1923 pr_err("Passed FCP Initiator Port %s does not"
1924 " match target port protoid: %s\n", i_port,
1925 tcm_vhost_dump_proto_id(tport_wwn));
1928 port_ptr = &i_port[3]; /* Skip over "fc." */
1931 ptr = strstr(i_port, "iqn.");
1933 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1934 pr_err("Passed iSCSI Initiator Port %s does not"
1935 " match target port protoid: %s\n", i_port,
1936 tcm_vhost_dump_proto_id(tport_wwn));
1939 port_ptr = &i_port[0];
1942 pr_err("Unable to locate prefix for emulated Initiator Port:"
1946 * Clear any trailing newline for the NAA WWN
1949 if (i_port[strlen(i_port)-1] == '\n')
1950 i_port[strlen(i_port)-1] = '\0';
1952 ret = tcm_vhost_make_nexus(tpg, port_ptr);
1959 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1961 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1962 &tcm_vhost_tpg_nexus.attr,
1966 static struct se_portal_group *
1967 tcm_vhost_make_tpg(struct se_wwn *wwn,
1968 struct config_group *group,
1971 struct tcm_vhost_tport *tport = container_of(wwn,
1972 struct tcm_vhost_tport, tport_wwn);
1974 struct tcm_vhost_tpg *tpg;
1978 if (strstr(name, "tpgt_") != name)
1979 return ERR_PTR(-EINVAL);
1980 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1981 return ERR_PTR(-EINVAL);
1983 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1985 pr_err("Unable to allocate struct tcm_vhost_tpg");
1986 return ERR_PTR(-ENOMEM);
1988 mutex_init(&tpg->tv_tpg_mutex);
1989 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1991 tpg->tport_tpgt = tpgt;
1993 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1994 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1999 mutex_lock(&tcm_vhost_mutex);
2000 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
2001 mutex_unlock(&tcm_vhost_mutex);
2003 return &tpg->se_tpg;
2006 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
2008 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
2009 struct tcm_vhost_tpg, se_tpg);
2011 mutex_lock(&tcm_vhost_mutex);
2012 list_del(&tpg->tv_tpg_list);
2013 mutex_unlock(&tcm_vhost_mutex);
2015 * Release the virtual I_T Nexus for this vhost TPG
2017 tcm_vhost_drop_nexus(tpg);
2019 * Deregister the se_tpg from TCM..
2021 core_tpg_deregister(se_tpg);
2025 static struct se_wwn *
2026 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2027 struct config_group *group,
2030 struct tcm_vhost_tport *tport;
2035 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
2036 return ERR_PTR(-EINVAL); */
2038 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
2040 pr_err("Unable to allocate struct tcm_vhost_tport");
2041 return ERR_PTR(-ENOMEM);
2043 tport->tport_wwpn = wwpn;
2045 * Determine the emulated Protocol Identifier and Target Port Name
2046 * based on the incoming configfs directory name.
2048 ptr = strstr(name, "naa.");
2050 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2053 ptr = strstr(name, "fc.");
2055 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2056 off = 3; /* Skip over "fc." */
2059 ptr = strstr(name, "iqn.");
2061 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2065 pr_err("Unable to locate prefix for emulated Target Port:"
2068 return ERR_PTR(-EINVAL);
2071 if (strlen(name) >= TCM_VHOST_NAMELEN) {
2072 pr_err("Emulated %s Address: %s, exceeds"
2073 " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
2076 return ERR_PTR(-EINVAL);
2078 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2080 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2081 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2083 return &tport->tport_wwn;
2086 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2088 struct tcm_vhost_tport *tport = container_of(wwn,
2089 struct tcm_vhost_tport, tport_wwn);
2091 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2092 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2099 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2102 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2103 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2104 utsname()->machine);
2107 TF_WWN_ATTR_RO(tcm_vhost, version);
2109 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2110 &tcm_vhost_wwn_version.attr,
2114 static struct target_core_fabric_ops tcm_vhost_ops = {
2115 .get_fabric_name = tcm_vhost_get_fabric_name,
2116 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
2117 .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
2118 .tpg_get_tag = tcm_vhost_get_tag,
2119 .tpg_get_default_depth = tcm_vhost_get_default_depth,
2120 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
2121 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
2122 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
2123 .tpg_check_demo_mode = tcm_vhost_check_true,
2124 .tpg_check_demo_mode_cache = tcm_vhost_check_true,
2125 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2126 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2127 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
2128 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
2129 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
2130 .release_cmd = tcm_vhost_release_cmd,
2131 .check_stop_free = vhost_scsi_check_stop_free,
2132 .shutdown_session = tcm_vhost_shutdown_session,
2133 .close_session = tcm_vhost_close_session,
2134 .sess_get_index = tcm_vhost_sess_get_index,
2135 .sess_get_initiator_sid = NULL,
2136 .write_pending = tcm_vhost_write_pending,
2137 .write_pending_status = tcm_vhost_write_pending_status,
2138 .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
2139 .get_task_tag = tcm_vhost_get_task_tag,
2140 .get_cmd_state = tcm_vhost_get_cmd_state,
2141 .queue_data_in = tcm_vhost_queue_data_in,
2142 .queue_status = tcm_vhost_queue_status,
2143 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
2144 .aborted_task = tcm_vhost_aborted_task,
2146 * Setup callers for generic logic in target_core_fabric_configfs.c
2148 .fabric_make_wwn = tcm_vhost_make_tport,
2149 .fabric_drop_wwn = tcm_vhost_drop_tport,
2150 .fabric_make_tpg = tcm_vhost_make_tpg,
2151 .fabric_drop_tpg = tcm_vhost_drop_tpg,
2152 .fabric_post_link = tcm_vhost_port_link,
2153 .fabric_pre_unlink = tcm_vhost_port_unlink,
2154 .fabric_make_np = NULL,
2155 .fabric_drop_np = NULL,
2156 .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
2157 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
2160 static int tcm_vhost_register_configfs(void)
2162 struct target_fabric_configfs *fabric;
2165 pr_debug("TCM_VHOST fabric module %s on %s/%s"
2166 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2167 utsname()->machine);
2169 * Register the top level struct config_item_type with TCM core
2171 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2172 if (IS_ERR(fabric)) {
2173 pr_err("target_fabric_configfs_init() failed\n");
2174 return PTR_ERR(fabric);
2177 * Setup fabric->tf_ops from our local tcm_vhost_ops
2179 fabric->tf_ops = tcm_vhost_ops;
2181 * Setup default attribute lists for various fabric->tf_cit_tmpl
2183 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2184 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2185 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2186 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2187 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2188 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2189 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2190 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2191 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2193 * Register the fabric for use within TCM
2195 ret = target_fabric_configfs_register(fabric);
2197 pr_err("target_fabric_configfs_register() failed"
2198 " for TCM_VHOST\n");
2202 * Setup our local pointer to *fabric
2204 tcm_vhost_fabric_configfs = fabric;
2205 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2209 static void tcm_vhost_deregister_configfs(void)
2211 if (!tcm_vhost_fabric_configfs)
2214 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2215 tcm_vhost_fabric_configfs = NULL;
2216 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2219 static int __init tcm_vhost_init(void)
2223 * Use our own dedicated workqueue for submitting I/O into
2224 * target core to avoid contention within system_wq.
2226 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2227 if (!tcm_vhost_workqueue)
2230 ret = vhost_scsi_register();
2232 goto out_destroy_workqueue;
2234 ret = tcm_vhost_register_configfs();
2236 goto out_vhost_scsi_deregister;
2240 out_vhost_scsi_deregister:
2241 vhost_scsi_deregister();
2242 out_destroy_workqueue:
2243 destroy_workqueue(tcm_vhost_workqueue);
2248 static void tcm_vhost_exit(void)
2250 tcm_vhost_deregister_configfs();
2251 vhost_scsi_deregister();
2252 destroy_workqueue(tcm_vhost_workqueue);
2255 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2256 MODULE_ALIAS("tcm_vhost");
2257 MODULE_LICENSE("GPL");
2258 module_init(tcm_vhost_init);
2259 module_exit(tcm_vhost_exit);