target: remove the get_fabric_proto_ident method
[firefly-linux-kernel-4.4.55.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_fabric.h>
43 #include <target/target_core_fabric_configfs.h>
44 #include <target/target_core_configfs.h>
45 #include <target/configfs_macros.h>
46 #include <linux/vhost.h>
47 #include <linux/virtio_scsi.h>
48 #include <linux/llist.h>
49 #include <linux/bitmap.h>
50 #include <linux/percpu_ida.h>
51
52 #include "vhost.h"
53
54 #define VHOST_SCSI_VERSION  "v0.1"
55 #define VHOST_SCSI_NAMELEN 256
56 #define VHOST_SCSI_MAX_CDB_SIZE 32
57 #define VHOST_SCSI_DEFAULT_TAGS 256
58 #define VHOST_SCSI_PREALLOC_SGLS 2048
59 #define VHOST_SCSI_PREALLOC_UPAGES 2048
60 #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
61
62 struct vhost_scsi_inflight {
63         /* Wait for the flush operation to finish */
64         struct completion comp;
65         /* Refcount for the inflight reqs */
66         struct kref kref;
67 };
68
69 struct vhost_scsi_cmd {
70         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
71         int tvc_vq_desc;
72         /* virtio-scsi initiator task attribute */
73         int tvc_task_attr;
74         /* virtio-scsi response incoming iovecs */
75         int tvc_in_iovs;
76         /* virtio-scsi initiator data direction */
77         enum dma_data_direction tvc_data_direction;
78         /* Expected data transfer length from virtio-scsi header */
79         u32 tvc_exp_data_len;
80         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
81         u64 tvc_tag;
82         /* The number of scatterlists associated with this cmd */
83         u32 tvc_sgl_count;
84         u32 tvc_prot_sgl_count;
85         /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
86         u32 tvc_lun;
87         /* Pointer to the SGL formatted memory from virtio-scsi */
88         struct scatterlist *tvc_sgl;
89         struct scatterlist *tvc_prot_sgl;
90         struct page **tvc_upages;
91         /* Pointer to response header iovec */
92         struct iovec *tvc_resp_iov;
93         /* Pointer to vhost_scsi for our device */
94         struct vhost_scsi *tvc_vhost;
95         /* Pointer to vhost_virtqueue for the cmd */
96         struct vhost_virtqueue *tvc_vq;
97         /* Pointer to vhost nexus memory */
98         struct vhost_scsi_nexus *tvc_nexus;
99         /* The TCM I/O descriptor that is accessed via container_of() */
100         struct se_cmd tvc_se_cmd;
101         /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
102         struct work_struct work;
103         /* Copy of the incoming SCSI command descriptor block (CDB) */
104         unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
105         /* Sense buffer that will be mapped into outgoing status */
106         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
107         /* Completed commands list, serviced from vhost worker thread */
108         struct llist_node tvc_completion_list;
109         /* Used to track inflight cmd */
110         struct vhost_scsi_inflight *inflight;
111 };
112
113 struct vhost_scsi_nexus {
114         /* Pointer to TCM session for I_T Nexus */
115         struct se_session *tvn_se_sess;
116 };
117
118 struct vhost_scsi_tpg {
119         /* Vhost port target portal group tag for TCM */
120         u16 tport_tpgt;
121         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
122         int tv_tpg_port_count;
123         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
124         int tv_tpg_vhost_count;
125         /* Used for enabling T10-PI with legacy devices */
126         int tv_fabric_prot_type;
127         /* list for vhost_scsi_list */
128         struct list_head tv_tpg_list;
129         /* Used to protect access for tpg_nexus */
130         struct mutex tv_tpg_mutex;
131         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
132         struct vhost_scsi_nexus *tpg_nexus;
133         /* Pointer back to vhost_scsi_tport */
134         struct vhost_scsi_tport *tport;
135         /* Returned by vhost_scsi_make_tpg() */
136         struct se_portal_group se_tpg;
137         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
138         struct vhost_scsi *vhost_scsi;
139 };
140
141 struct vhost_scsi_tport {
142         /* SCSI protocol the tport is providing */
143         u8 tport_proto_id;
144         /* Binary World Wide unique Port Name for Vhost Target port */
145         u64 tport_wwpn;
146         /* ASCII formatted WWPN for Vhost Target port */
147         char tport_name[VHOST_SCSI_NAMELEN];
148         /* Returned by vhost_scsi_make_tport() */
149         struct se_wwn tport_wwn;
150 };
151
152 struct vhost_scsi_evt {
153         /* event to be sent to guest */
154         struct virtio_scsi_event event;
155         /* event list, serviced from vhost worker thread */
156         struct llist_node list;
157 };
158
159 enum {
160         VHOST_SCSI_VQ_CTL = 0,
161         VHOST_SCSI_VQ_EVT = 1,
162         VHOST_SCSI_VQ_IO = 2,
163 };
164
165 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
166 enum {
167         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
168                                                (1ULL << VIRTIO_SCSI_F_T10_PI) |
169                                                (1ULL << VIRTIO_F_ANY_LAYOUT) |
170                                                (1ULL << VIRTIO_F_VERSION_1)
171 };
172
173 #define VHOST_SCSI_MAX_TARGET   256
174 #define VHOST_SCSI_MAX_VQ       128
175 #define VHOST_SCSI_MAX_EVENT    128
176
177 struct vhost_scsi_virtqueue {
178         struct vhost_virtqueue vq;
179         /*
180          * Reference counting for inflight reqs, used for flush operation. At
181          * each time, one reference tracks new commands submitted, while we
182          * wait for another one to reach 0.
183          */
184         struct vhost_scsi_inflight inflights[2];
185         /*
186          * Indicate current inflight in use, protected by vq->mutex.
187          * Writers must also take dev mutex and flush under it.
188          */
189         int inflight_idx;
190 };
191
192 struct vhost_scsi {
193         /* Protected by vhost_scsi->dev.mutex */
194         struct vhost_scsi_tpg **vs_tpg;
195         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
196
197         struct vhost_dev dev;
198         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
199
200         struct vhost_work vs_completion_work; /* cmd completion work item */
201         struct llist_head vs_completion_list; /* cmd completion queue */
202
203         struct vhost_work vs_event_work; /* evt injection work item */
204         struct llist_head vs_event_list; /* evt injection queue */
205
206         bool vs_events_missed; /* any missed events, protected by vq->mutex */
207         int vs_events_nr; /* num of pending events, protected by vq->mutex */
208 };
209
210 static struct target_core_fabric_ops vhost_scsi_ops;
211 static struct workqueue_struct *vhost_scsi_workqueue;
212
213 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
214 static DEFINE_MUTEX(vhost_scsi_mutex);
215 static LIST_HEAD(vhost_scsi_list);
216
217 static int iov_num_pages(void __user *iov_base, size_t iov_len)
218 {
219         return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
220                ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
221 }
222
223 static void vhost_scsi_done_inflight(struct kref *kref)
224 {
225         struct vhost_scsi_inflight *inflight;
226
227         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
228         complete(&inflight->comp);
229 }
230
231 static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
232                                     struct vhost_scsi_inflight *old_inflight[])
233 {
234         struct vhost_scsi_inflight *new_inflight;
235         struct vhost_virtqueue *vq;
236         int idx, i;
237
238         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
239                 vq = &vs->vqs[i].vq;
240
241                 mutex_lock(&vq->mutex);
242
243                 /* store old infight */
244                 idx = vs->vqs[i].inflight_idx;
245                 if (old_inflight)
246                         old_inflight[i] = &vs->vqs[i].inflights[idx];
247
248                 /* setup new infight */
249                 vs->vqs[i].inflight_idx = idx ^ 1;
250                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
251                 kref_init(&new_inflight->kref);
252                 init_completion(&new_inflight->comp);
253
254                 mutex_unlock(&vq->mutex);
255         }
256 }
257
258 static struct vhost_scsi_inflight *
259 vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
260 {
261         struct vhost_scsi_inflight *inflight;
262         struct vhost_scsi_virtqueue *svq;
263
264         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
265         inflight = &svq->inflights[svq->inflight_idx];
266         kref_get(&inflight->kref);
267
268         return inflight;
269 }
270
271 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
272 {
273         kref_put(&inflight->kref, vhost_scsi_done_inflight);
274 }
275
276 static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
277 {
278         return 1;
279 }
280
281 static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
282 {
283         return 0;
284 }
285
286 static char *vhost_scsi_get_fabric_name(void)
287 {
288         return "vhost";
289 }
290
291 static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
292 {
293         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
294                                 struct vhost_scsi_tpg, se_tpg);
295         struct vhost_scsi_tport *tport = tpg->tport;
296
297         return &tport->tport_name[0];
298 }
299
300 static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
301 {
302         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
303                                 struct vhost_scsi_tpg, se_tpg);
304         return tpg->tport_tpgt;
305 }
306
307 static u32
308 vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
309                               struct se_node_acl *se_nacl,
310                               struct t10_pr_registration *pr_reg,
311                               int *format_code,
312                               unsigned char *buf)
313 {
314         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
315                                 struct vhost_scsi_tpg, se_tpg);
316         struct vhost_scsi_tport *tport = tpg->tport;
317
318         switch (tport->tport_proto_id) {
319         case SCSI_PROTOCOL_SAS:
320                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
321                                         format_code, buf);
322         case SCSI_PROTOCOL_FCP:
323                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
324                                         format_code, buf);
325         case SCSI_PROTOCOL_ISCSI:
326                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
327                                         format_code, buf);
328         default:
329                 pr_err("Unknown tport_proto_id: 0x%02x, using"
330                         " SAS emulation\n", tport->tport_proto_id);
331                 break;
332         }
333
334         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
335                         format_code, buf);
336 }
337
338 static u32
339 vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
340                                   struct se_node_acl *se_nacl,
341                                   struct t10_pr_registration *pr_reg,
342                                   int *format_code)
343 {
344         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
345                                 struct vhost_scsi_tpg, se_tpg);
346         struct vhost_scsi_tport *tport = tpg->tport;
347
348         switch (tport->tport_proto_id) {
349         case SCSI_PROTOCOL_SAS:
350                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
351                                         format_code);
352         case SCSI_PROTOCOL_FCP:
353                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
354                                         format_code);
355         case SCSI_PROTOCOL_ISCSI:
356                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
357                                         format_code);
358         default:
359                 pr_err("Unknown tport_proto_id: 0x%02x, using"
360                         " SAS emulation\n", tport->tport_proto_id);
361                 break;
362         }
363
364         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
365                         format_code);
366 }
367
368 static char *
369 vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
370                                     const char *buf,
371                                     u32 *out_tid_len,
372                                     char **port_nexus_ptr)
373 {
374         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
375                                 struct vhost_scsi_tpg, se_tpg);
376         struct vhost_scsi_tport *tport = tpg->tport;
377
378         switch (tport->tport_proto_id) {
379         case SCSI_PROTOCOL_SAS:
380                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
381                                         port_nexus_ptr);
382         case SCSI_PROTOCOL_FCP:
383                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
384                                         port_nexus_ptr);
385         case SCSI_PROTOCOL_ISCSI:
386                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
387                                         port_nexus_ptr);
388         default:
389                 pr_err("Unknown tport_proto_id: 0x%02x, using"
390                         " SAS emulation\n", tport->tport_proto_id);
391                 break;
392         }
393
394         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
395                         port_nexus_ptr);
396 }
397
398 static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
399 {
400         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
401                                 struct vhost_scsi_tpg, se_tpg);
402
403         return tpg->tv_fabric_prot_type;
404 }
405
406 static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
407 {
408         return 1;
409 }
410
411 static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
412 {
413         struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
414                                 struct vhost_scsi_cmd, tvc_se_cmd);
415         struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
416         int i;
417
418         if (tv_cmd->tvc_sgl_count) {
419                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
420                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
421         }
422         if (tv_cmd->tvc_prot_sgl_count) {
423                 for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
424                         put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
425         }
426
427         vhost_scsi_put_inflight(tv_cmd->inflight);
428         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
429 }
430
431 static int vhost_scsi_shutdown_session(struct se_session *se_sess)
432 {
433         return 0;
434 }
435
436 static void vhost_scsi_close_session(struct se_session *se_sess)
437 {
438         return;
439 }
440
441 static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
442 {
443         return 0;
444 }
445
446 static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
447 {
448         /* Go ahead and process the write immediately */
449         target_execute_cmd(se_cmd);
450         return 0;
451 }
452
453 static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
454 {
455         return 0;
456 }
457
458 static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
459 {
460         return;
461 }
462
463 static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
464 {
465         return 0;
466 }
467
468 static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
469 {
470         return 0;
471 }
472
473 static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
474 {
475         struct vhost_scsi *vs = cmd->tvc_vhost;
476
477         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
478
479         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
480 }
481
482 static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
483 {
484         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
485                                 struct vhost_scsi_cmd, tvc_se_cmd);
486         vhost_scsi_complete_cmd(cmd);
487         return 0;
488 }
489
490 static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
491 {
492         struct vhost_scsi_cmd *cmd = container_of(se_cmd,
493                                 struct vhost_scsi_cmd, tvc_se_cmd);
494         vhost_scsi_complete_cmd(cmd);
495         return 0;
496 }
497
498 static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
499 {
500         return;
501 }
502
503 static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
504 {
505         return;
506 }
507
508 static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
509 {
510         vs->vs_events_nr--;
511         kfree(evt);
512 }
513
514 static struct vhost_scsi_evt *
515 vhost_scsi_allocate_evt(struct vhost_scsi *vs,
516                        u32 event, u32 reason)
517 {
518         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
519         struct vhost_scsi_evt *evt;
520
521         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
522                 vs->vs_events_missed = true;
523                 return NULL;
524         }
525
526         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
527         if (!evt) {
528                 vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
529                 vs->vs_events_missed = true;
530                 return NULL;
531         }
532
533         evt->event.event = cpu_to_vhost32(vq, event);
534         evt->event.reason = cpu_to_vhost32(vq, reason);
535         vs->vs_events_nr++;
536
537         return evt;
538 }
539
540 static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
541 {
542         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
543
544         /* TODO locking against target/backend threads? */
545         transport_generic_free_cmd(se_cmd, 0);
546
547 }
548
549 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
550 {
551         return target_put_sess_cmd(se_cmd);
552 }
553
554 static void
555 vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
556 {
557         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
558         struct virtio_scsi_event *event = &evt->event;
559         struct virtio_scsi_event __user *eventp;
560         unsigned out, in;
561         int head, ret;
562
563         if (!vq->private_data) {
564                 vs->vs_events_missed = true;
565                 return;
566         }
567
568 again:
569         vhost_disable_notify(&vs->dev, vq);
570         head = vhost_get_vq_desc(vq, vq->iov,
571                         ARRAY_SIZE(vq->iov), &out, &in,
572                         NULL, NULL);
573         if (head < 0) {
574                 vs->vs_events_missed = true;
575                 return;
576         }
577         if (head == vq->num) {
578                 if (vhost_enable_notify(&vs->dev, vq))
579                         goto again;
580                 vs->vs_events_missed = true;
581                 return;
582         }
583
584         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
585                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
586                                 vq->iov[out].iov_len);
587                 vs->vs_events_missed = true;
588                 return;
589         }
590
591         if (vs->vs_events_missed) {
592                 event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
593                 vs->vs_events_missed = false;
594         }
595
596         eventp = vq->iov[out].iov_base;
597         ret = __copy_to_user(eventp, event, sizeof(*event));
598         if (!ret)
599                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
600         else
601                 vq_err(vq, "Faulted on vhost_scsi_send_event\n");
602 }
603
604 static void vhost_scsi_evt_work(struct vhost_work *work)
605 {
606         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
607                                         vs_event_work);
608         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
609         struct vhost_scsi_evt *evt;
610         struct llist_node *llnode;
611
612         mutex_lock(&vq->mutex);
613         llnode = llist_del_all(&vs->vs_event_list);
614         while (llnode) {
615                 evt = llist_entry(llnode, struct vhost_scsi_evt, list);
616                 llnode = llist_next(llnode);
617                 vhost_scsi_do_evt_work(vs, evt);
618                 vhost_scsi_free_evt(vs, evt);
619         }
620         mutex_unlock(&vq->mutex);
621 }
622
623 /* Fill in status and signal that we are done processing this command
624  *
625  * This is scheduled in the vhost work queue so we are called with the owner
626  * process mm and can access the vring.
627  */
628 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
629 {
630         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
631                                         vs_completion_work);
632         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
633         struct virtio_scsi_cmd_resp v_rsp;
634         struct vhost_scsi_cmd *cmd;
635         struct llist_node *llnode;
636         struct se_cmd *se_cmd;
637         struct iov_iter iov_iter;
638         int ret, vq;
639
640         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
641         llnode = llist_del_all(&vs->vs_completion_list);
642         while (llnode) {
643                 cmd = llist_entry(llnode, struct vhost_scsi_cmd,
644                                      tvc_completion_list);
645                 llnode = llist_next(llnode);
646                 se_cmd = &cmd->tvc_se_cmd;
647
648                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
649                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
650
651                 memset(&v_rsp, 0, sizeof(v_rsp));
652                 v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
653                 /* TODO is status_qualifier field needed? */
654                 v_rsp.status = se_cmd->scsi_status;
655                 v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
656                                                  se_cmd->scsi_sense_length);
657                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
658                        se_cmd->scsi_sense_length);
659
660                 iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
661                               cmd->tvc_in_iovs, sizeof(v_rsp));
662                 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
663                 if (likely(ret == sizeof(v_rsp))) {
664                         struct vhost_scsi_virtqueue *q;
665                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
666                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
667                         vq = q - vs->vqs;
668                         __set_bit(vq, signal);
669                 } else
670                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
671
672                 vhost_scsi_free_cmd(cmd);
673         }
674
675         vq = -1;
676         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
677                 < VHOST_SCSI_MAX_VQ)
678                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
679 }
680
681 static struct vhost_scsi_cmd *
682 vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
683                    unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
684                    u32 exp_data_len, int data_direction)
685 {
686         struct vhost_scsi_cmd *cmd;
687         struct vhost_scsi_nexus *tv_nexus;
688         struct se_session *se_sess;
689         struct scatterlist *sg, *prot_sg;
690         struct page **pages;
691         int tag;
692
693         tv_nexus = tpg->tpg_nexus;
694         if (!tv_nexus) {
695                 pr_err("Unable to locate active struct vhost_scsi_nexus\n");
696                 return ERR_PTR(-EIO);
697         }
698         se_sess = tv_nexus->tvn_se_sess;
699
700         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
701         if (tag < 0) {
702                 pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
703                 return ERR_PTR(-ENOMEM);
704         }
705
706         cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
707         sg = cmd->tvc_sgl;
708         prot_sg = cmd->tvc_prot_sgl;
709         pages = cmd->tvc_upages;
710         memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
711
712         cmd->tvc_sgl = sg;
713         cmd->tvc_prot_sgl = prot_sg;
714         cmd->tvc_upages = pages;
715         cmd->tvc_se_cmd.map_tag = tag;
716         cmd->tvc_tag = scsi_tag;
717         cmd->tvc_lun = lun;
718         cmd->tvc_task_attr = task_attr;
719         cmd->tvc_exp_data_len = exp_data_len;
720         cmd->tvc_data_direction = data_direction;
721         cmd->tvc_nexus = tv_nexus;
722         cmd->inflight = vhost_scsi_get_inflight(vq);
723
724         memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
725
726         return cmd;
727 }
728
729 /*
730  * Map a user memory range into a scatterlist
731  *
732  * Returns the number of scatterlist entries used or -errno on error.
733  */
734 static int
735 vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
736                       void __user *ptr,
737                       size_t len,
738                       struct scatterlist *sgl,
739                       bool write)
740 {
741         unsigned int npages = 0, offset, nbytes;
742         unsigned int pages_nr = iov_num_pages(ptr, len);
743         struct scatterlist *sg = sgl;
744         struct page **pages = cmd->tvc_upages;
745         int ret, i;
746
747         if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
748                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
749                        " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
750                         pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
751                 return -ENOBUFS;
752         }
753
754         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
755         /* No pages were pinned */
756         if (ret < 0)
757                 goto out;
758         /* Less pages pinned than wanted */
759         if (ret != pages_nr) {
760                 for (i = 0; i < ret; i++)
761                         put_page(pages[i]);
762                 ret = -EFAULT;
763                 goto out;
764         }
765
766         while (len > 0) {
767                 offset = (uintptr_t)ptr & ~PAGE_MASK;
768                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
769                 sg_set_page(sg, pages[npages], nbytes, offset);
770                 ptr += nbytes;
771                 len -= nbytes;
772                 sg++;
773                 npages++;
774         }
775
776 out:
777         return ret;
778 }
779
780 static int
781 vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
782 {
783         int sgl_count = 0;
784
785         if (!iter || !iter->iov) {
786                 pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
787                        " present\n", __func__, bytes);
788                 return -EINVAL;
789         }
790
791         sgl_count = iov_iter_npages(iter, 0xffff);
792         if (sgl_count > max_sgls) {
793                 pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
794                        " max_sgls: %d\n", __func__, sgl_count, max_sgls);
795                 return -EINVAL;
796         }
797         return sgl_count;
798 }
799
800 static int
801 vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
802                       struct iov_iter *iter,
803                       struct scatterlist *sg, int sg_count)
804 {
805         size_t off = iter->iov_offset;
806         int i, ret;
807
808         for (i = 0; i < iter->nr_segs; i++) {
809                 void __user *base = iter->iov[i].iov_base + off;
810                 size_t len = iter->iov[i].iov_len - off;
811
812                 ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
813                 if (ret < 0) {
814                         for (i = 0; i < sg_count; i++) {
815                                 struct page *page = sg_page(&sg[i]);
816                                 if (page)
817                                         put_page(page);
818                         }
819                         return ret;
820                 }
821                 sg += ret;
822                 off = 0;
823         }
824         return 0;
825 }
826
827 static int
828 vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
829                  size_t prot_bytes, struct iov_iter *prot_iter,
830                  size_t data_bytes, struct iov_iter *data_iter)
831 {
832         int sgl_count, ret;
833         bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
834
835         if (prot_bytes) {
836                 sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
837                                                  VHOST_SCSI_PREALLOC_PROT_SGLS);
838                 if (sgl_count < 0)
839                         return sgl_count;
840
841                 sg_init_table(cmd->tvc_prot_sgl, sgl_count);
842                 cmd->tvc_prot_sgl_count = sgl_count;
843                 pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
844                          cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
845
846                 ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
847                                             cmd->tvc_prot_sgl,
848                                             cmd->tvc_prot_sgl_count);
849                 if (ret < 0) {
850                         cmd->tvc_prot_sgl_count = 0;
851                         return ret;
852                 }
853         }
854         sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
855                                          VHOST_SCSI_PREALLOC_SGLS);
856         if (sgl_count < 0)
857                 return sgl_count;
858
859         sg_init_table(cmd->tvc_sgl, sgl_count);
860         cmd->tvc_sgl_count = sgl_count;
861         pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
862                   cmd->tvc_sgl, cmd->tvc_sgl_count);
863
864         ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
865                                     cmd->tvc_sgl, cmd->tvc_sgl_count);
866         if (ret < 0) {
867                 cmd->tvc_sgl_count = 0;
868                 return ret;
869         }
870         return 0;
871 }
872
873 static int vhost_scsi_to_tcm_attr(int attr)
874 {
875         switch (attr) {
876         case VIRTIO_SCSI_S_SIMPLE:
877                 return TCM_SIMPLE_TAG;
878         case VIRTIO_SCSI_S_ORDERED:
879                 return TCM_ORDERED_TAG;
880         case VIRTIO_SCSI_S_HEAD:
881                 return TCM_HEAD_TAG;
882         case VIRTIO_SCSI_S_ACA:
883                 return TCM_ACA_TAG;
884         default:
885                 break;
886         }
887         return TCM_SIMPLE_TAG;
888 }
889
890 static void vhost_scsi_submission_work(struct work_struct *work)
891 {
892         struct vhost_scsi_cmd *cmd =
893                 container_of(work, struct vhost_scsi_cmd, work);
894         struct vhost_scsi_nexus *tv_nexus;
895         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
896         struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
897         int rc;
898
899         /* FIXME: BIDI operation */
900         if (cmd->tvc_sgl_count) {
901                 sg_ptr = cmd->tvc_sgl;
902
903                 if (cmd->tvc_prot_sgl_count)
904                         sg_prot_ptr = cmd->tvc_prot_sgl;
905                 else
906                         se_cmd->prot_pto = true;
907         } else {
908                 sg_ptr = NULL;
909         }
910         tv_nexus = cmd->tvc_nexus;
911
912         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
913                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
914                         cmd->tvc_lun, cmd->tvc_exp_data_len,
915                         vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
916                         cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
917                         sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
918                         cmd->tvc_prot_sgl_count);
919         if (rc < 0) {
920                 transport_send_check_condition_and_sense(se_cmd,
921                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
922                 transport_generic_free_cmd(se_cmd, 0);
923         }
924 }
925
926 static void
927 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
928                            struct vhost_virtqueue *vq,
929                            int head, unsigned out)
930 {
931         struct virtio_scsi_cmd_resp __user *resp;
932         struct virtio_scsi_cmd_resp rsp;
933         int ret;
934
935         memset(&rsp, 0, sizeof(rsp));
936         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
937         resp = vq->iov[out].iov_base;
938         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
939         if (!ret)
940                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
941         else
942                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
943 }
944
945 static void
946 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
947 {
948         struct vhost_scsi_tpg **vs_tpg, *tpg;
949         struct virtio_scsi_cmd_req v_req;
950         struct virtio_scsi_cmd_req_pi v_req_pi;
951         struct vhost_scsi_cmd *cmd;
952         struct iov_iter out_iter, in_iter, prot_iter, data_iter;
953         u64 tag;
954         u32 exp_data_len, data_direction;
955         unsigned out, in;
956         int head, ret, prot_bytes;
957         size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
958         size_t out_size, in_size;
959         u16 lun;
960         u8 *target, *lunp, task_attr;
961         bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
962         void *req, *cdb;
963
964         mutex_lock(&vq->mutex);
965         /*
966          * We can handle the vq only after the endpoint is setup by calling the
967          * VHOST_SCSI_SET_ENDPOINT ioctl.
968          */
969         vs_tpg = vq->private_data;
970         if (!vs_tpg)
971                 goto out;
972
973         vhost_disable_notify(&vs->dev, vq);
974
975         for (;;) {
976                 head = vhost_get_vq_desc(vq, vq->iov,
977                                          ARRAY_SIZE(vq->iov), &out, &in,
978                                          NULL, NULL);
979                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
980                          head, out, in);
981                 /* On error, stop handling until the next kick. */
982                 if (unlikely(head < 0))
983                         break;
984                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
985                 if (head == vq->num) {
986                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
987                                 vhost_disable_notify(&vs->dev, vq);
988                                 continue;
989                         }
990                         break;
991                 }
992                 /*
993                  * Check for a sane response buffer so we can report early
994                  * errors back to the guest.
995                  */
996                 if (unlikely(vq->iov[out].iov_len < rsp_size)) {
997                         vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
998                                 " size, got %zu bytes\n", vq->iov[out].iov_len);
999                         break;
1000                 }
1001                 /*
1002                  * Setup pointers and values based upon different virtio-scsi
1003                  * request header if T10_PI is enabled in KVM guest.
1004                  */
1005                 if (t10_pi) {
1006                         req = &v_req_pi;
1007                         req_size = sizeof(v_req_pi);
1008                         lunp = &v_req_pi.lun[0];
1009                         target = &v_req_pi.lun[1];
1010                 } else {
1011                         req = &v_req;
1012                         req_size = sizeof(v_req);
1013                         lunp = &v_req.lun[0];
1014                         target = &v_req.lun[1];
1015                 }
1016                 /*
1017                  * FIXME: Not correct for BIDI operation
1018                  */
1019                 out_size = iov_length(vq->iov, out);
1020                 in_size = iov_length(&vq->iov[out], in);
1021
1022                 /*
1023                  * Copy over the virtio-scsi request header, which for a
1024                  * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1025                  * single iovec may contain both the header + outgoing
1026                  * WRITE payloads.
1027                  *
1028                  * copy_from_iter() will advance out_iter, so that it will
1029                  * point at the start of the outgoing WRITE payload, if
1030                  * DMA_TO_DEVICE is set.
1031                  */
1032                 iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
1033
1034                 ret = copy_from_iter(req, req_size, &out_iter);
1035                 if (unlikely(ret != req_size)) {
1036                         vq_err(vq, "Faulted on copy_from_iter\n");
1037                         vhost_scsi_send_bad_target(vs, vq, head, out);
1038                         continue;
1039                 }
1040                 /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1041                 if (unlikely(*lunp != 1)) {
1042                         vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
1043                         vhost_scsi_send_bad_target(vs, vq, head, out);
1044                         continue;
1045                 }
1046
1047                 tpg = ACCESS_ONCE(vs_tpg[*target]);
1048                 if (unlikely(!tpg)) {
1049                         /* Target does not exist, fail the request */
1050                         vhost_scsi_send_bad_target(vs, vq, head, out);
1051                         continue;
1052                 }
1053                 /*
1054                  * Determine data_direction by calculating the total outgoing
1055                  * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
1056                  * response headers respectively.
1057                  *
1058                  * For DMA_TO_DEVICE this is out_iter, which is already pointing
1059                  * to the right place.
1060                  *
1061                  * For DMA_FROM_DEVICE, the iovec will be just past the end
1062                  * of the virtio-scsi response header in either the same
1063                  * or immediately following iovec.
1064                  *
1065                  * Any associated T10_PI bytes for the outgoing / incoming
1066                  * payloads are included in calculation of exp_data_len here.
1067                  */
1068                 prot_bytes = 0;
1069
1070                 if (out_size > req_size) {
1071                         data_direction = DMA_TO_DEVICE;
1072                         exp_data_len = out_size - req_size;
1073                         data_iter = out_iter;
1074                 } else if (in_size > rsp_size) {
1075                         data_direction = DMA_FROM_DEVICE;
1076                         exp_data_len = in_size - rsp_size;
1077
1078                         iov_iter_init(&in_iter, READ, &vq->iov[out], in,
1079                                       rsp_size + exp_data_len);
1080                         iov_iter_advance(&in_iter, rsp_size);
1081                         data_iter = in_iter;
1082                 } else {
1083                         data_direction = DMA_NONE;
1084                         exp_data_len = 0;
1085                 }
1086                 /*
1087                  * If T10_PI header + payload is present, setup prot_iter values
1088                  * and recalculate data_iter for vhost_scsi_mapal() mapping to
1089                  * host scatterlists via get_user_pages_fast().
1090                  */
1091                 if (t10_pi) {
1092                         if (v_req_pi.pi_bytesout) {
1093                                 if (data_direction != DMA_TO_DEVICE) {
1094                                         vq_err(vq, "Received non zero pi_bytesout,"
1095                                                 " but wrong data_direction\n");
1096                                         vhost_scsi_send_bad_target(vs, vq, head, out);
1097                                         continue;
1098                                 }
1099                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
1100                         } else if (v_req_pi.pi_bytesin) {
1101                                 if (data_direction != DMA_FROM_DEVICE) {
1102                                         vq_err(vq, "Received non zero pi_bytesin,"
1103                                                 " but wrong data_direction\n");
1104                                         vhost_scsi_send_bad_target(vs, vq, head, out);
1105                                         continue;
1106                                 }
1107                                 prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
1108                         }
1109                         /*
1110                          * Set prot_iter to data_iter, and advance past any
1111                          * preceeding prot_bytes that may be present.
1112                          *
1113                          * Also fix up the exp_data_len to reflect only the
1114                          * actual data payload length.
1115                          */
1116                         if (prot_bytes) {
1117                                 exp_data_len -= prot_bytes;
1118                                 prot_iter = data_iter;
1119                                 iov_iter_advance(&data_iter, prot_bytes);
1120                         }
1121                         tag = vhost64_to_cpu(vq, v_req_pi.tag);
1122                         task_attr = v_req_pi.task_attr;
1123                         cdb = &v_req_pi.cdb[0];
1124                         lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
1125                 } else {
1126                         tag = vhost64_to_cpu(vq, v_req.tag);
1127                         task_attr = v_req.task_attr;
1128                         cdb = &v_req.cdb[0];
1129                         lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1130                 }
1131                 /*
1132                  * Check that the received CDB size does not exceeded our
1133                  * hardcoded max for vhost-scsi, then get a pre-allocated
1134                  * cmd descriptor for the new virtio-scsi tag.
1135                  *
1136                  * TODO what if cdb was too small for varlen cdb header?
1137                  */
1138                 if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
1139                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1140                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1141                                 scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
1142                         vhost_scsi_send_bad_target(vs, vq, head, out);
1143                         continue;
1144                 }
1145                 cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
1146                                          exp_data_len + prot_bytes,
1147                                          data_direction);
1148                 if (IS_ERR(cmd)) {
1149                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1150                                PTR_ERR(cmd));
1151                         vhost_scsi_send_bad_target(vs, vq, head, out);
1152                         continue;
1153                 }
1154                 cmd->tvc_vhost = vs;
1155                 cmd->tvc_vq = vq;
1156                 cmd->tvc_resp_iov = &vq->iov[out];
1157                 cmd->tvc_in_iovs = in;
1158
1159                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1160                          cmd->tvc_cdb[0], cmd->tvc_lun);
1161                 pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
1162                          " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
1163
1164                 if (data_direction != DMA_NONE) {
1165                         ret = vhost_scsi_mapal(cmd,
1166                                                prot_bytes, &prot_iter,
1167                                                exp_data_len, &data_iter);
1168                         if (unlikely(ret)) {
1169                                 vq_err(vq, "Failed to map iov to sgl\n");
1170                                 vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
1171                                 vhost_scsi_send_bad_target(vs, vq, head, out);
1172                                 continue;
1173                         }
1174                 }
1175                 /*
1176                  * Save the descriptor from vhost_get_vq_desc() to be used to
1177                  * complete the virtio-scsi request in TCM callback context via
1178                  * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
1179                  */
1180                 cmd->tvc_vq_desc = head;
1181                 /*
1182                  * Dispatch cmd descriptor for cmwq execution in process
1183                  * context provided by vhost_scsi_workqueue.  This also ensures
1184                  * cmd is executed on the same kworker CPU as this vhost
1185                  * thread to gain positive L2 cache locality effects.
1186                  */
1187                 INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1188                 queue_work(vhost_scsi_workqueue, &cmd->work);
1189         }
1190 out:
1191         mutex_unlock(&vq->mutex);
1192 }
1193
1194 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1195 {
1196         pr_debug("%s: The handling func for control queue.\n", __func__);
1197 }
1198
1199 static void
1200 vhost_scsi_send_evt(struct vhost_scsi *vs,
1201                    struct vhost_scsi_tpg *tpg,
1202                    struct se_lun *lun,
1203                    u32 event,
1204                    u32 reason)
1205 {
1206         struct vhost_scsi_evt *evt;
1207
1208         evt = vhost_scsi_allocate_evt(vs, event, reason);
1209         if (!evt)
1210                 return;
1211
1212         if (tpg && lun) {
1213                 /* TODO: share lun setup code with virtio-scsi.ko */
1214                 /*
1215                  * Note: evt->event is zeroed when we allocate it and
1216                  * lun[4-7] need to be zero according to virtio-scsi spec.
1217                  */
1218                 evt->event.lun[0] = 0x01;
1219                 evt->event.lun[1] = tpg->tport_tpgt;
1220                 if (lun->unpacked_lun >= 256)
1221                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1222                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1223         }
1224
1225         llist_add(&evt->list, &vs->vs_event_list);
1226         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1227 }
1228
1229 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1230 {
1231         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1232                                                 poll.work);
1233         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1234
1235         mutex_lock(&vq->mutex);
1236         if (!vq->private_data)
1237                 goto out;
1238
1239         if (vs->vs_events_missed)
1240                 vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1241 out:
1242         mutex_unlock(&vq->mutex);
1243 }
1244
1245 static void vhost_scsi_handle_kick(struct vhost_work *work)
1246 {
1247         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1248                                                 poll.work);
1249         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1250
1251         vhost_scsi_handle_vq(vs, vq);
1252 }
1253
1254 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1255 {
1256         vhost_poll_flush(&vs->vqs[index].vq.poll);
1257 }
1258
1259 /* Callers must hold dev mutex */
1260 static void vhost_scsi_flush(struct vhost_scsi *vs)
1261 {
1262         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1263         int i;
1264
1265         /* Init new inflight and remember the old inflight */
1266         vhost_scsi_init_inflight(vs, old_inflight);
1267
1268         /*
1269          * The inflight->kref was initialized to 1. We decrement it here to
1270          * indicate the start of the flush operation so that it will reach 0
1271          * when all the reqs are finished.
1272          */
1273         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1274                 kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
1275
1276         /* Flush both the vhost poll and vhost work */
1277         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1278                 vhost_scsi_flush_vq(vs, i);
1279         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1280         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1281
1282         /* Wait for all reqs issued before the flush to be finished */
1283         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1284                 wait_for_completion(&old_inflight[i]->comp);
1285 }
1286
1287 /*
1288  * Called from vhost_scsi_ioctl() context to walk the list of available
1289  * vhost_scsi_tpg with an active struct vhost_scsi_nexus
1290  *
1291  *  The lock nesting rule is:
1292  *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1293  */
1294 static int
1295 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1296                         struct vhost_scsi_target *t)
1297 {
1298         struct se_portal_group *se_tpg;
1299         struct vhost_scsi_tport *tv_tport;
1300         struct vhost_scsi_tpg *tpg;
1301         struct vhost_scsi_tpg **vs_tpg;
1302         struct vhost_virtqueue *vq;
1303         int index, ret, i, len;
1304         bool match = false;
1305
1306         mutex_lock(&vhost_scsi_mutex);
1307         mutex_lock(&vs->dev.mutex);
1308
1309         /* Verify that ring has been setup correctly. */
1310         for (index = 0; index < vs->dev.nvqs; ++index) {
1311                 /* Verify that ring has been setup correctly. */
1312                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1313                         ret = -EFAULT;
1314                         goto out;
1315                 }
1316         }
1317
1318         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1319         vs_tpg = kzalloc(len, GFP_KERNEL);
1320         if (!vs_tpg) {
1321                 ret = -ENOMEM;
1322                 goto out;
1323         }
1324         if (vs->vs_tpg)
1325                 memcpy(vs_tpg, vs->vs_tpg, len);
1326
1327         list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1328                 mutex_lock(&tpg->tv_tpg_mutex);
1329                 if (!tpg->tpg_nexus) {
1330                         mutex_unlock(&tpg->tv_tpg_mutex);
1331                         continue;
1332                 }
1333                 if (tpg->tv_tpg_vhost_count != 0) {
1334                         mutex_unlock(&tpg->tv_tpg_mutex);
1335                         continue;
1336                 }
1337                 tv_tport = tpg->tport;
1338
1339                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1340                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1341                                 kfree(vs_tpg);
1342                                 mutex_unlock(&tpg->tv_tpg_mutex);
1343                                 ret = -EEXIST;
1344                                 goto out;
1345                         }
1346                         /*
1347                          * In order to ensure individual vhost-scsi configfs
1348                          * groups cannot be removed while in use by vhost ioctl,
1349                          * go ahead and take an explicit se_tpg->tpg_group.cg_item
1350                          * dependency now.
1351                          */
1352                         se_tpg = &tpg->se_tpg;
1353                         ret = target_depend_item(&se_tpg->tpg_group.cg_item);
1354                         if (ret) {
1355                                 pr_warn("configfs_depend_item() failed: %d\n", ret);
1356                                 kfree(vs_tpg);
1357                                 mutex_unlock(&tpg->tv_tpg_mutex);
1358                                 goto out;
1359                         }
1360                         tpg->tv_tpg_vhost_count++;
1361                         tpg->vhost_scsi = vs;
1362                         vs_tpg[tpg->tport_tpgt] = tpg;
1363                         smp_mb__after_atomic();
1364                         match = true;
1365                 }
1366                 mutex_unlock(&tpg->tv_tpg_mutex);
1367         }
1368
1369         if (match) {
1370                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1371                        sizeof(vs->vs_vhost_wwpn));
1372                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1373                         vq = &vs->vqs[i].vq;
1374                         mutex_lock(&vq->mutex);
1375                         vq->private_data = vs_tpg;
1376                         vhost_init_used(vq);
1377                         mutex_unlock(&vq->mutex);
1378                 }
1379                 ret = 0;
1380         } else {
1381                 ret = -EEXIST;
1382         }
1383
1384         /*
1385          * Act as synchronize_rcu to make sure access to
1386          * old vs->vs_tpg is finished.
1387          */
1388         vhost_scsi_flush(vs);
1389         kfree(vs->vs_tpg);
1390         vs->vs_tpg = vs_tpg;
1391
1392 out:
1393         mutex_unlock(&vs->dev.mutex);
1394         mutex_unlock(&vhost_scsi_mutex);
1395         return ret;
1396 }
1397
1398 static int
1399 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1400                           struct vhost_scsi_target *t)
1401 {
1402         struct se_portal_group *se_tpg;
1403         struct vhost_scsi_tport *tv_tport;
1404         struct vhost_scsi_tpg *tpg;
1405         struct vhost_virtqueue *vq;
1406         bool match = false;
1407         int index, ret, i;
1408         u8 target;
1409
1410         mutex_lock(&vhost_scsi_mutex);
1411         mutex_lock(&vs->dev.mutex);
1412         /* Verify that ring has been setup correctly. */
1413         for (index = 0; index < vs->dev.nvqs; ++index) {
1414                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1415                         ret = -EFAULT;
1416                         goto err_dev;
1417                 }
1418         }
1419
1420         if (!vs->vs_tpg) {
1421                 ret = 0;
1422                 goto err_dev;
1423         }
1424
1425         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1426                 target = i;
1427                 tpg = vs->vs_tpg[target];
1428                 if (!tpg)
1429                         continue;
1430
1431                 mutex_lock(&tpg->tv_tpg_mutex);
1432                 tv_tport = tpg->tport;
1433                 if (!tv_tport) {
1434                         ret = -ENODEV;
1435                         goto err_tpg;
1436                 }
1437
1438                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1439                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1440                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1441                                 tv_tport->tport_name, tpg->tport_tpgt,
1442                                 t->vhost_wwpn, t->vhost_tpgt);
1443                         ret = -EINVAL;
1444                         goto err_tpg;
1445                 }
1446                 tpg->tv_tpg_vhost_count--;
1447                 tpg->vhost_scsi = NULL;
1448                 vs->vs_tpg[target] = NULL;
1449                 match = true;
1450                 mutex_unlock(&tpg->tv_tpg_mutex);
1451                 /*
1452                  * Release se_tpg->tpg_group.cg_item configfs dependency now
1453                  * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1454                  */
1455                 se_tpg = &tpg->se_tpg;
1456                 target_undepend_item(&se_tpg->tpg_group.cg_item);
1457         }
1458         if (match) {
1459                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1460                         vq = &vs->vqs[i].vq;
1461                         mutex_lock(&vq->mutex);
1462                         vq->private_data = NULL;
1463                         mutex_unlock(&vq->mutex);
1464                 }
1465         }
1466         /*
1467          * Act as synchronize_rcu to make sure access to
1468          * old vs->vs_tpg is finished.
1469          */
1470         vhost_scsi_flush(vs);
1471         kfree(vs->vs_tpg);
1472         vs->vs_tpg = NULL;
1473         WARN_ON(vs->vs_events_nr);
1474         mutex_unlock(&vs->dev.mutex);
1475         mutex_unlock(&vhost_scsi_mutex);
1476         return 0;
1477
1478 err_tpg:
1479         mutex_unlock(&tpg->tv_tpg_mutex);
1480 err_dev:
1481         mutex_unlock(&vs->dev.mutex);
1482         mutex_unlock(&vhost_scsi_mutex);
1483         return ret;
1484 }
1485
1486 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1487 {
1488         struct vhost_virtqueue *vq;
1489         int i;
1490
1491         if (features & ~VHOST_SCSI_FEATURES)
1492                 return -EOPNOTSUPP;
1493
1494         mutex_lock(&vs->dev.mutex);
1495         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1496             !vhost_log_access_ok(&vs->dev)) {
1497                 mutex_unlock(&vs->dev.mutex);
1498                 return -EFAULT;
1499         }
1500
1501         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1502                 vq = &vs->vqs[i].vq;
1503                 mutex_lock(&vq->mutex);
1504                 vq->acked_features = features;
1505                 mutex_unlock(&vq->mutex);
1506         }
1507         mutex_unlock(&vs->dev.mutex);
1508         return 0;
1509 }
1510
1511 static int vhost_scsi_open(struct inode *inode, struct file *f)
1512 {
1513         struct vhost_scsi *vs;
1514         struct vhost_virtqueue **vqs;
1515         int r = -ENOMEM, i;
1516
1517         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1518         if (!vs) {
1519                 vs = vzalloc(sizeof(*vs));
1520                 if (!vs)
1521                         goto err_vs;
1522         }
1523
1524         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1525         if (!vqs)
1526                 goto err_vqs;
1527
1528         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1529         vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
1530
1531         vs->vs_events_nr = 0;
1532         vs->vs_events_missed = false;
1533
1534         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1535         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1536         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1537         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1538         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1539                 vqs[i] = &vs->vqs[i].vq;
1540                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1541         }
1542         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1543
1544         vhost_scsi_init_inflight(vs, NULL);
1545
1546         f->private_data = vs;
1547         return 0;
1548
1549 err_vqs:
1550         kvfree(vs);
1551 err_vs:
1552         return r;
1553 }
1554
1555 static int vhost_scsi_release(struct inode *inode, struct file *f)
1556 {
1557         struct vhost_scsi *vs = f->private_data;
1558         struct vhost_scsi_target t;
1559
1560         mutex_lock(&vs->dev.mutex);
1561         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1562         mutex_unlock(&vs->dev.mutex);
1563         vhost_scsi_clear_endpoint(vs, &t);
1564         vhost_dev_stop(&vs->dev);
1565         vhost_dev_cleanup(&vs->dev, false);
1566         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1567         vhost_scsi_flush(vs);
1568         kfree(vs->dev.vqs);
1569         kvfree(vs);
1570         return 0;
1571 }
1572
1573 static long
1574 vhost_scsi_ioctl(struct file *f,
1575                  unsigned int ioctl,
1576                  unsigned long arg)
1577 {
1578         struct vhost_scsi *vs = f->private_data;
1579         struct vhost_scsi_target backend;
1580         void __user *argp = (void __user *)arg;
1581         u64 __user *featurep = argp;
1582         u32 __user *eventsp = argp;
1583         u32 events_missed;
1584         u64 features;
1585         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1586         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1587
1588         switch (ioctl) {
1589         case VHOST_SCSI_SET_ENDPOINT:
1590                 if (copy_from_user(&backend, argp, sizeof backend))
1591                         return -EFAULT;
1592                 if (backend.reserved != 0)
1593                         return -EOPNOTSUPP;
1594
1595                 return vhost_scsi_set_endpoint(vs, &backend);
1596         case VHOST_SCSI_CLEAR_ENDPOINT:
1597                 if (copy_from_user(&backend, argp, sizeof backend))
1598                         return -EFAULT;
1599                 if (backend.reserved != 0)
1600                         return -EOPNOTSUPP;
1601
1602                 return vhost_scsi_clear_endpoint(vs, &backend);
1603         case VHOST_SCSI_GET_ABI_VERSION:
1604                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1605                         return -EFAULT;
1606                 return 0;
1607         case VHOST_SCSI_SET_EVENTS_MISSED:
1608                 if (get_user(events_missed, eventsp))
1609                         return -EFAULT;
1610                 mutex_lock(&vq->mutex);
1611                 vs->vs_events_missed = events_missed;
1612                 mutex_unlock(&vq->mutex);
1613                 return 0;
1614         case VHOST_SCSI_GET_EVENTS_MISSED:
1615                 mutex_lock(&vq->mutex);
1616                 events_missed = vs->vs_events_missed;
1617                 mutex_unlock(&vq->mutex);
1618                 if (put_user(events_missed, eventsp))
1619                         return -EFAULT;
1620                 return 0;
1621         case VHOST_GET_FEATURES:
1622                 features = VHOST_SCSI_FEATURES;
1623                 if (copy_to_user(featurep, &features, sizeof features))
1624                         return -EFAULT;
1625                 return 0;
1626         case VHOST_SET_FEATURES:
1627                 if (copy_from_user(&features, featurep, sizeof features))
1628                         return -EFAULT;
1629                 return vhost_scsi_set_features(vs, features);
1630         default:
1631                 mutex_lock(&vs->dev.mutex);
1632                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1633                 /* TODO: flush backend after dev ioctl. */
1634                 if (r == -ENOIOCTLCMD)
1635                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1636                 mutex_unlock(&vs->dev.mutex);
1637                 return r;
1638         }
1639 }
1640
1641 #ifdef CONFIG_COMPAT
1642 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1643                                 unsigned long arg)
1644 {
1645         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1646 }
1647 #endif
1648
1649 static const struct file_operations vhost_scsi_fops = {
1650         .owner          = THIS_MODULE,
1651         .release        = vhost_scsi_release,
1652         .unlocked_ioctl = vhost_scsi_ioctl,
1653 #ifdef CONFIG_COMPAT
1654         .compat_ioctl   = vhost_scsi_compat_ioctl,
1655 #endif
1656         .open           = vhost_scsi_open,
1657         .llseek         = noop_llseek,
1658 };
1659
1660 static struct miscdevice vhost_scsi_misc = {
1661         MISC_DYNAMIC_MINOR,
1662         "vhost-scsi",
1663         &vhost_scsi_fops,
1664 };
1665
1666 static int __init vhost_scsi_register(void)
1667 {
1668         return misc_register(&vhost_scsi_misc);
1669 }
1670
1671 static int vhost_scsi_deregister(void)
1672 {
1673         return misc_deregister(&vhost_scsi_misc);
1674 }
1675
1676 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
1677 {
1678         switch (tport->tport_proto_id) {
1679         case SCSI_PROTOCOL_SAS:
1680                 return "SAS";
1681         case SCSI_PROTOCOL_FCP:
1682                 return "FCP";
1683         case SCSI_PROTOCOL_ISCSI:
1684                 return "iSCSI";
1685         default:
1686                 break;
1687         }
1688
1689         return "Unknown";
1690 }
1691
1692 static void
1693 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
1694                   struct se_lun *lun, bool plug)
1695 {
1696
1697         struct vhost_scsi *vs = tpg->vhost_scsi;
1698         struct vhost_virtqueue *vq;
1699         u32 reason;
1700
1701         if (!vs)
1702                 return;
1703
1704         mutex_lock(&vs->dev.mutex);
1705
1706         if (plug)
1707                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1708         else
1709                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1710
1711         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1712         mutex_lock(&vq->mutex);
1713         if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
1714                 vhost_scsi_send_evt(vs, tpg, lun,
1715                                    VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1716         mutex_unlock(&vq->mutex);
1717         mutex_unlock(&vs->dev.mutex);
1718 }
1719
1720 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1721 {
1722         vhost_scsi_do_plug(tpg, lun, true);
1723 }
1724
1725 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
1726 {
1727         vhost_scsi_do_plug(tpg, lun, false);
1728 }
1729
1730 static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
1731                                struct se_lun *lun)
1732 {
1733         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1734                                 struct vhost_scsi_tpg, se_tpg);
1735
1736         mutex_lock(&vhost_scsi_mutex);
1737
1738         mutex_lock(&tpg->tv_tpg_mutex);
1739         tpg->tv_tpg_port_count++;
1740         mutex_unlock(&tpg->tv_tpg_mutex);
1741
1742         vhost_scsi_hotplug(tpg, lun);
1743
1744         mutex_unlock(&vhost_scsi_mutex);
1745
1746         return 0;
1747 }
1748
1749 static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
1750                                   struct se_lun *lun)
1751 {
1752         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1753                                 struct vhost_scsi_tpg, se_tpg);
1754
1755         mutex_lock(&vhost_scsi_mutex);
1756
1757         mutex_lock(&tpg->tv_tpg_mutex);
1758         tpg->tv_tpg_port_count--;
1759         mutex_unlock(&tpg->tv_tpg_mutex);
1760
1761         vhost_scsi_hotunplug(tpg, lun);
1762
1763         mutex_unlock(&vhost_scsi_mutex);
1764 }
1765
1766 static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
1767                                        struct se_session *se_sess)
1768 {
1769         struct vhost_scsi_cmd *tv_cmd;
1770         unsigned int i;
1771
1772         if (!se_sess->sess_cmd_map)
1773                 return;
1774
1775         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1776                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1777
1778                 kfree(tv_cmd->tvc_sgl);
1779                 kfree(tv_cmd->tvc_prot_sgl);
1780                 kfree(tv_cmd->tvc_upages);
1781         }
1782 }
1783
1784 static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type(
1785         struct se_portal_group *se_tpg,
1786         const char *page,
1787         size_t count)
1788 {
1789         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1790                                 struct vhost_scsi_tpg, se_tpg);
1791         unsigned long val;
1792         int ret = kstrtoul(page, 0, &val);
1793
1794         if (ret) {
1795                 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
1796                 return ret;
1797         }
1798         if (val != 0 && val != 1 && val != 3) {
1799                 pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
1800                 return -EINVAL;
1801         }
1802         tpg->tv_fabric_prot_type = val;
1803
1804         return count;
1805 }
1806
1807 static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type(
1808         struct se_portal_group *se_tpg,
1809         char *page)
1810 {
1811         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1812                                 struct vhost_scsi_tpg, se_tpg);
1813
1814         return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
1815 }
1816 TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR);
1817
1818 static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
1819         &vhost_scsi_tpg_attrib_fabric_prot_type.attr,
1820         NULL,
1821 };
1822
1823 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
1824                                 const char *name)
1825 {
1826         struct se_portal_group *se_tpg;
1827         struct se_session *se_sess;
1828         struct vhost_scsi_nexus *tv_nexus;
1829         struct vhost_scsi_cmd *tv_cmd;
1830         unsigned int i;
1831
1832         mutex_lock(&tpg->tv_tpg_mutex);
1833         if (tpg->tpg_nexus) {
1834                 mutex_unlock(&tpg->tv_tpg_mutex);
1835                 pr_debug("tpg->tpg_nexus already exists\n");
1836                 return -EEXIST;
1837         }
1838         se_tpg = &tpg->se_tpg;
1839
1840         tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
1841         if (!tv_nexus) {
1842                 mutex_unlock(&tpg->tv_tpg_mutex);
1843                 pr_err("Unable to allocate struct vhost_scsi_nexus\n");
1844                 return -ENOMEM;
1845         }
1846         /*
1847          *  Initialize the struct se_session pointer and setup tagpool
1848          *  for struct vhost_scsi_cmd descriptors
1849          */
1850         tv_nexus->tvn_se_sess = transport_init_session_tags(
1851                                         VHOST_SCSI_DEFAULT_TAGS,
1852                                         sizeof(struct vhost_scsi_cmd),
1853                                         TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
1854         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1855                 mutex_unlock(&tpg->tv_tpg_mutex);
1856                 kfree(tv_nexus);
1857                 return -ENOMEM;
1858         }
1859         se_sess = tv_nexus->tvn_se_sess;
1860         for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
1861                 tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
1862
1863                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1864                                         VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
1865                 if (!tv_cmd->tvc_sgl) {
1866                         mutex_unlock(&tpg->tv_tpg_mutex);
1867                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1868                         goto out;
1869                 }
1870
1871                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1872                                         VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
1873                 if (!tv_cmd->tvc_upages) {
1874                         mutex_unlock(&tpg->tv_tpg_mutex);
1875                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1876                         goto out;
1877                 }
1878
1879                 tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
1880                                         VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
1881                 if (!tv_cmd->tvc_prot_sgl) {
1882                         mutex_unlock(&tpg->tv_tpg_mutex);
1883                         pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
1884                         goto out;
1885                 }
1886         }
1887         /*
1888          * Since we are running in 'demo mode' this call with generate a
1889          * struct se_node_acl for the vhost_scsi struct se_portal_group with
1890          * the SCSI Initiator port name of the passed configfs group 'name'.
1891          */
1892         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1893                                 se_tpg, (unsigned char *)name);
1894         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1895                 mutex_unlock(&tpg->tv_tpg_mutex);
1896                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1897                                 " for %s\n", name);
1898                 goto out;
1899         }
1900         /*
1901          * Now register the TCM vhost virtual I_T Nexus as active.
1902          */
1903         transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1904                         tv_nexus->tvn_se_sess, tv_nexus);
1905         tpg->tpg_nexus = tv_nexus;
1906
1907         mutex_unlock(&tpg->tv_tpg_mutex);
1908         return 0;
1909
1910 out:
1911         vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1912         transport_free_session(se_sess);
1913         kfree(tv_nexus);
1914         return -ENOMEM;
1915 }
1916
1917 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
1918 {
1919         struct se_session *se_sess;
1920         struct vhost_scsi_nexus *tv_nexus;
1921
1922         mutex_lock(&tpg->tv_tpg_mutex);
1923         tv_nexus = tpg->tpg_nexus;
1924         if (!tv_nexus) {
1925                 mutex_unlock(&tpg->tv_tpg_mutex);
1926                 return -ENODEV;
1927         }
1928
1929         se_sess = tv_nexus->tvn_se_sess;
1930         if (!se_sess) {
1931                 mutex_unlock(&tpg->tv_tpg_mutex);
1932                 return -ENODEV;
1933         }
1934
1935         if (tpg->tv_tpg_port_count != 0) {
1936                 mutex_unlock(&tpg->tv_tpg_mutex);
1937                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1938                         " active TPG port count: %d\n",
1939                         tpg->tv_tpg_port_count);
1940                 return -EBUSY;
1941         }
1942
1943         if (tpg->tv_tpg_vhost_count != 0) {
1944                 mutex_unlock(&tpg->tv_tpg_mutex);
1945                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1946                         " active TPG vhost count: %d\n",
1947                         tpg->tv_tpg_vhost_count);
1948                 return -EBUSY;
1949         }
1950
1951         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1952                 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
1953                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1954
1955         vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
1956         /*
1957          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1958          */
1959         transport_deregister_session(tv_nexus->tvn_se_sess);
1960         tpg->tpg_nexus = NULL;
1961         mutex_unlock(&tpg->tv_tpg_mutex);
1962
1963         kfree(tv_nexus);
1964         return 0;
1965 }
1966
1967 static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
1968                                         char *page)
1969 {
1970         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1971                                 struct vhost_scsi_tpg, se_tpg);
1972         struct vhost_scsi_nexus *tv_nexus;
1973         ssize_t ret;
1974
1975         mutex_lock(&tpg->tv_tpg_mutex);
1976         tv_nexus = tpg->tpg_nexus;
1977         if (!tv_nexus) {
1978                 mutex_unlock(&tpg->tv_tpg_mutex);
1979                 return -ENODEV;
1980         }
1981         ret = snprintf(page, PAGE_SIZE, "%s\n",
1982                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1983         mutex_unlock(&tpg->tv_tpg_mutex);
1984
1985         return ret;
1986 }
1987
1988 static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
1989                                          const char *page,
1990                                          size_t count)
1991 {
1992         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
1993                                 struct vhost_scsi_tpg, se_tpg);
1994         struct vhost_scsi_tport *tport_wwn = tpg->tport;
1995         unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
1996         int ret;
1997         /*
1998          * Shutdown the active I_T nexus if 'NULL' is passed..
1999          */
2000         if (!strncmp(page, "NULL", 4)) {
2001                 ret = vhost_scsi_drop_nexus(tpg);
2002                 return (!ret) ? count : ret;
2003         }
2004         /*
2005          * Otherwise make sure the passed virtual Initiator port WWN matches
2006          * the fabric protocol_id set in vhost_scsi_make_tport(), and call
2007          * vhost_scsi_make_nexus().
2008          */
2009         if (strlen(page) >= VHOST_SCSI_NAMELEN) {
2010                 pr_err("Emulated NAA Sas Address: %s, exceeds"
2011                                 " max: %d\n", page, VHOST_SCSI_NAMELEN);
2012                 return -EINVAL;
2013         }
2014         snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
2015
2016         ptr = strstr(i_port, "naa.");
2017         if (ptr) {
2018                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
2019                         pr_err("Passed SAS Initiator Port %s does not"
2020                                 " match target port protoid: %s\n", i_port,
2021                                 vhost_scsi_dump_proto_id(tport_wwn));
2022                         return -EINVAL;
2023                 }
2024                 port_ptr = &i_port[0];
2025                 goto check_newline;
2026         }
2027         ptr = strstr(i_port, "fc.");
2028         if (ptr) {
2029                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
2030                         pr_err("Passed FCP Initiator Port %s does not"
2031                                 " match target port protoid: %s\n", i_port,
2032                                 vhost_scsi_dump_proto_id(tport_wwn));
2033                         return -EINVAL;
2034                 }
2035                 port_ptr = &i_port[3]; /* Skip over "fc." */
2036                 goto check_newline;
2037         }
2038         ptr = strstr(i_port, "iqn.");
2039         if (ptr) {
2040                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
2041                         pr_err("Passed iSCSI Initiator Port %s does not"
2042                                 " match target port protoid: %s\n", i_port,
2043                                 vhost_scsi_dump_proto_id(tport_wwn));
2044                         return -EINVAL;
2045                 }
2046                 port_ptr = &i_port[0];
2047                 goto check_newline;
2048         }
2049         pr_err("Unable to locate prefix for emulated Initiator Port:"
2050                         " %s\n", i_port);
2051         return -EINVAL;
2052         /*
2053          * Clear any trailing newline for the NAA WWN
2054          */
2055 check_newline:
2056         if (i_port[strlen(i_port)-1] == '\n')
2057                 i_port[strlen(i_port)-1] = '\0';
2058
2059         ret = vhost_scsi_make_nexus(tpg, port_ptr);
2060         if (ret < 0)
2061                 return ret;
2062
2063         return count;
2064 }
2065
2066 TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
2067
2068 static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
2069         &vhost_scsi_tpg_nexus.attr,
2070         NULL,
2071 };
2072
2073 static struct se_portal_group *
2074 vhost_scsi_make_tpg(struct se_wwn *wwn,
2075                    struct config_group *group,
2076                    const char *name)
2077 {
2078         struct vhost_scsi_tport *tport = container_of(wwn,
2079                         struct vhost_scsi_tport, tport_wwn);
2080
2081         struct vhost_scsi_tpg *tpg;
2082         u16 tpgt;
2083         int ret;
2084
2085         if (strstr(name, "tpgt_") != name)
2086                 return ERR_PTR(-EINVAL);
2087         if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
2088                 return ERR_PTR(-EINVAL);
2089
2090         tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
2091         if (!tpg) {
2092                 pr_err("Unable to allocate struct vhost_scsi_tpg");
2093                 return ERR_PTR(-ENOMEM);
2094         }
2095         mutex_init(&tpg->tv_tpg_mutex);
2096         INIT_LIST_HEAD(&tpg->tv_tpg_list);
2097         tpg->tport = tport;
2098         tpg->tport_tpgt = tpgt;
2099
2100         ret = core_tpg_register(&vhost_scsi_ops, wwn, &tpg->se_tpg,
2101                                 tport->tport_proto_id);
2102         if (ret < 0) {
2103                 kfree(tpg);
2104                 return NULL;
2105         }
2106         mutex_lock(&vhost_scsi_mutex);
2107         list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2108         mutex_unlock(&vhost_scsi_mutex);
2109
2110         return &tpg->se_tpg;
2111 }
2112
2113 static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
2114 {
2115         struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2116                                 struct vhost_scsi_tpg, se_tpg);
2117
2118         mutex_lock(&vhost_scsi_mutex);
2119         list_del(&tpg->tv_tpg_list);
2120         mutex_unlock(&vhost_scsi_mutex);
2121         /*
2122          * Release the virtual I_T Nexus for this vhost TPG
2123          */
2124         vhost_scsi_drop_nexus(tpg);
2125         /*
2126          * Deregister the se_tpg from TCM..
2127          */
2128         core_tpg_deregister(se_tpg);
2129         kfree(tpg);
2130 }
2131
2132 static struct se_wwn *
2133 vhost_scsi_make_tport(struct target_fabric_configfs *tf,
2134                      struct config_group *group,
2135                      const char *name)
2136 {
2137         struct vhost_scsi_tport *tport;
2138         char *ptr;
2139         u64 wwpn = 0;
2140         int off = 0;
2141
2142         /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2143                 return ERR_PTR(-EINVAL); */
2144
2145         tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
2146         if (!tport) {
2147                 pr_err("Unable to allocate struct vhost_scsi_tport");
2148                 return ERR_PTR(-ENOMEM);
2149         }
2150         tport->tport_wwpn = wwpn;
2151         /*
2152          * Determine the emulated Protocol Identifier and Target Port Name
2153          * based on the incoming configfs directory name.
2154          */
2155         ptr = strstr(name, "naa.");
2156         if (ptr) {
2157                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2158                 goto check_len;
2159         }
2160         ptr = strstr(name, "fc.");
2161         if (ptr) {
2162                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2163                 off = 3; /* Skip over "fc." */
2164                 goto check_len;
2165         }
2166         ptr = strstr(name, "iqn.");
2167         if (ptr) {
2168                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2169                 goto check_len;
2170         }
2171
2172         pr_err("Unable to locate prefix for emulated Target Port:"
2173                         " %s\n", name);
2174         kfree(tport);
2175         return ERR_PTR(-EINVAL);
2176
2177 check_len:
2178         if (strlen(name) >= VHOST_SCSI_NAMELEN) {
2179                 pr_err("Emulated %s Address: %s, exceeds"
2180                         " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
2181                         VHOST_SCSI_NAMELEN);
2182                 kfree(tport);
2183                 return ERR_PTR(-EINVAL);
2184         }
2185         snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
2186
2187         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2188                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
2189
2190         return &tport->tport_wwn;
2191 }
2192
2193 static void vhost_scsi_drop_tport(struct se_wwn *wwn)
2194 {
2195         struct vhost_scsi_tport *tport = container_of(wwn,
2196                                 struct vhost_scsi_tport, tport_wwn);
2197
2198         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2199                 " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
2200                 tport->tport_name);
2201
2202         kfree(tport);
2203 }
2204
2205 static ssize_t
2206 vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
2207                                 char *page)
2208 {
2209         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2210                 "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2211                 utsname()->machine);
2212 }
2213
2214 TF_WWN_ATTR_RO(vhost_scsi, version);
2215
2216 static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2217         &vhost_scsi_wwn_version.attr,
2218         NULL,
2219 };
2220
2221 static struct target_core_fabric_ops vhost_scsi_ops = {
2222         .module                         = THIS_MODULE,
2223         .name                           = "vhost",
2224         .get_fabric_name                = vhost_scsi_get_fabric_name,
2225         .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
2226         .tpg_get_tag                    = vhost_scsi_get_tpgt,
2227         .tpg_get_pr_transport_id        = vhost_scsi_get_pr_transport_id,
2228         .tpg_get_pr_transport_id_len    = vhost_scsi_get_pr_transport_id_len,
2229         .tpg_parse_pr_out_transport_id  = vhost_scsi_parse_pr_out_transport_id,
2230         .tpg_check_demo_mode            = vhost_scsi_check_true,
2231         .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
2232         .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
2233         .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
2234         .tpg_check_prot_fabric_only     = vhost_scsi_check_prot_fabric_only,
2235         .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
2236         .release_cmd                    = vhost_scsi_release_cmd,
2237         .check_stop_free                = vhost_scsi_check_stop_free,
2238         .shutdown_session               = vhost_scsi_shutdown_session,
2239         .close_session                  = vhost_scsi_close_session,
2240         .sess_get_index                 = vhost_scsi_sess_get_index,
2241         .sess_get_initiator_sid         = NULL,
2242         .write_pending                  = vhost_scsi_write_pending,
2243         .write_pending_status           = vhost_scsi_write_pending_status,
2244         .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
2245         .get_task_tag                   = vhost_scsi_get_task_tag,
2246         .get_cmd_state                  = vhost_scsi_get_cmd_state,
2247         .queue_data_in                  = vhost_scsi_queue_data_in,
2248         .queue_status                   = vhost_scsi_queue_status,
2249         .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
2250         .aborted_task                   = vhost_scsi_aborted_task,
2251         /*
2252          * Setup callers for generic logic in target_core_fabric_configfs.c
2253          */
2254         .fabric_make_wwn                = vhost_scsi_make_tport,
2255         .fabric_drop_wwn                = vhost_scsi_drop_tport,
2256         .fabric_make_tpg                = vhost_scsi_make_tpg,
2257         .fabric_drop_tpg                = vhost_scsi_drop_tpg,
2258         .fabric_post_link               = vhost_scsi_port_link,
2259         .fabric_pre_unlink              = vhost_scsi_port_unlink,
2260
2261         .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
2262         .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
2263         .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
2264 };
2265
2266 static int __init vhost_scsi_init(void)
2267 {
2268         int ret = -ENOMEM;
2269
2270         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2271                 " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
2272                 utsname()->machine);
2273
2274         /*
2275          * Use our own dedicated workqueue for submitting I/O into
2276          * target core to avoid contention within system_wq.
2277          */
2278         vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
2279         if (!vhost_scsi_workqueue)
2280                 goto out;
2281
2282         ret = vhost_scsi_register();
2283         if (ret < 0)
2284                 goto out_destroy_workqueue;
2285
2286         ret = target_register_template(&vhost_scsi_ops);
2287         if (ret < 0)
2288                 goto out_vhost_scsi_deregister;
2289
2290         return 0;
2291
2292 out_vhost_scsi_deregister:
2293         vhost_scsi_deregister();
2294 out_destroy_workqueue:
2295         destroy_workqueue(vhost_scsi_workqueue);
2296 out:
2297         return ret;
2298 };
2299
2300 static void vhost_scsi_exit(void)
2301 {
2302         target_unregister_template(&vhost_scsi_ops);
2303         vhost_scsi_deregister();
2304         destroy_workqueue(vhost_scsi_workqueue);
2305 };
2306
2307 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2308 MODULE_ALIAS("tcm_vhost");
2309 MODULE_LICENSE("GPL");
2310 module_init(vhost_scsi_init);
2311 module_exit(vhost_scsi_exit);