2 * Virtio SCSI HBA driver
4 * Copyright IBM Corp. 2010
5 * Copyright Red Hat, Inc. 2011
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/mempool.h>
21 #include <linux/virtio.h>
22 #include <linux/virtio_ids.h>
23 #include <linux/virtio_config.h>
24 #include <linux/virtio_scsi.h>
25 #include <linux/cpu.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_cmnd.h>
30 #define VIRTIO_SCSI_MEMPOOL_SZ 64
31 #define VIRTIO_SCSI_EVENT_LEN 8
32 #define VIRTIO_SCSI_VQ_BASE 2
34 /* Command queue element */
35 struct virtio_scsi_cmd {
37 struct completion *comp;
39 struct virtio_scsi_cmd_req cmd;
40 struct virtio_scsi_ctrl_tmf_req tmf;
41 struct virtio_scsi_ctrl_an_req an;
44 struct virtio_scsi_cmd_resp cmd;
45 struct virtio_scsi_ctrl_tmf_resp tmf;
46 struct virtio_scsi_ctrl_an_resp an;
47 struct virtio_scsi_event evt;
49 } ____cacheline_aligned_in_smp;
51 struct virtio_scsi_event_node {
52 struct virtio_scsi *vscsi;
53 struct virtio_scsi_event event;
54 struct work_struct work;
57 struct virtio_scsi_vq {
65 * Per-target queue state.
67 * This struct holds the data needed by the queue steering policy. When a
68 * target is sent multiple requests, we need to drive them to the same queue so
69 * that FIFO processing order is kept. However, if a target was idle, we can
70 * choose a queue arbitrarily. In this case the queue is chosen according to
71 * the current VCPU, so the driver expects the number of request queues to be
72 * equal to the number of VCPUs. This makes it easy and fast to select the
73 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
74 * (each virtqueue's affinity is set to the CPU that "owns" the queue).
76 * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
77 * could be done locklessly, but we do not do it yet.
79 * Decrements of reqs are never concurrent with writes of req_vq: before the
80 * decrement reqs will be != 0; after the decrement the virtqueue completion
81 * routine will not use the req_vq so it can be changed by a new request.
82 * Thus they can happen outside the tgt_lock, provided of course we make reqs
85 struct virtio_scsi_target_state {
86 /* This spinlock never held at the same time as vq_lock. */
89 /* Count of outstanding requests. */
92 /* Currently active virtqueue for requests sent to this target. */
93 struct virtio_scsi_vq *req_vq;
96 /* Driver instance state */
98 struct virtio_device *vdev;
100 /* Get some buffers ready for event vq */
101 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
105 /* If the affinity hint is set for virtqueues */
106 bool affinity_hint_set;
108 /* CPU hotplug notifier */
109 struct notifier_block nb;
111 struct virtio_scsi_vq ctrl_vq;
112 struct virtio_scsi_vq event_vq;
113 struct virtio_scsi_vq req_vqs[];
116 static struct kmem_cache *virtscsi_cmd_cache;
117 static mempool_t *virtscsi_cmd_pool;
119 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
124 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
129 if (!scsi_bidi_cmnd(sc)) {
130 scsi_set_resid(sc, resid);
134 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
135 scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
139 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
141 * Called with vq_lock held.
143 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
145 struct virtio_scsi_cmd *cmd = buf;
146 struct scsi_cmnd *sc = cmd->sc;
147 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
148 struct virtio_scsi_target_state *tgt =
149 scsi_target(sc->device)->hostdata;
151 dev_dbg(&sc->device->sdev_gendev,
152 "cmd %p response %u status %#02x sense_len %u\n",
153 sc, resp->response, resp->status, resp->sense_len);
155 sc->result = resp->status;
156 virtscsi_compute_resid(sc, resp->resid);
157 switch (resp->response) {
158 case VIRTIO_SCSI_S_OK:
159 set_host_byte(sc, DID_OK);
161 case VIRTIO_SCSI_S_OVERRUN:
162 set_host_byte(sc, DID_ERROR);
164 case VIRTIO_SCSI_S_ABORTED:
165 set_host_byte(sc, DID_ABORT);
167 case VIRTIO_SCSI_S_BAD_TARGET:
168 set_host_byte(sc, DID_BAD_TARGET);
170 case VIRTIO_SCSI_S_RESET:
171 set_host_byte(sc, DID_RESET);
173 case VIRTIO_SCSI_S_BUSY:
174 set_host_byte(sc, DID_BUS_BUSY);
176 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
177 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
179 case VIRTIO_SCSI_S_TARGET_FAILURE:
180 set_host_byte(sc, DID_TARGET_FAILURE);
182 case VIRTIO_SCSI_S_NEXUS_FAILURE:
183 set_host_byte(sc, DID_NEXUS_FAILURE);
186 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
189 case VIRTIO_SCSI_S_FAILURE:
190 set_host_byte(sc, DID_ERROR);
194 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
195 if (sc->sense_buffer) {
196 memcpy(sc->sense_buffer, resp->sense,
197 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
199 set_driver_byte(sc, DRIVER_SENSE);
204 atomic_dec(&tgt->reqs);
207 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
208 struct virtio_scsi_vq *virtscsi_vq,
209 void (*fn)(struct virtio_scsi *vscsi, void *buf))
214 struct virtqueue *vq = virtscsi_vq->vq;
216 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
218 virtqueue_disable_cb(vq);
219 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
222 if (unlikely(virtqueue_is_broken(vq)))
224 } while (!virtqueue_enable_cb(vq));
225 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
228 static void virtscsi_req_done(struct virtqueue *vq)
230 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
231 struct virtio_scsi *vscsi = shost_priv(sh);
232 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
233 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
235 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
238 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
240 struct virtio_scsi_cmd *cmd = buf;
243 complete_all(cmd->comp);
246 static void virtscsi_ctrl_done(struct virtqueue *vq)
248 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
249 struct virtio_scsi *vscsi = shost_priv(sh);
251 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
254 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
255 struct virtio_scsi_event_node *event_node)
258 struct scatterlist sg;
261 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
263 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
265 err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
268 virtqueue_kick(vscsi->event_vq.vq);
270 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
275 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
279 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
280 vscsi->event_list[i].vscsi = vscsi;
281 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
287 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
291 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
292 cancel_work_sync(&vscsi->event_list[i].work);
295 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
296 struct virtio_scsi_event *event)
298 struct scsi_device *sdev;
299 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
300 unsigned int target = event->lun[1];
301 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
303 switch (event->reason) {
304 case VIRTIO_SCSI_EVT_RESET_RESCAN:
305 scsi_add_device(shost, 0, target, lun);
307 case VIRTIO_SCSI_EVT_RESET_REMOVED:
308 sdev = scsi_device_lookup(shost, 0, target, lun);
310 scsi_remove_device(sdev);
311 scsi_device_put(sdev);
313 pr_err("SCSI device %d 0 %d %d not found\n",
314 shost->host_no, target, lun);
318 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
322 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
323 struct virtio_scsi_event *event)
325 struct scsi_device *sdev;
326 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
327 unsigned int target = event->lun[1];
328 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
329 u8 asc = event->reason & 255;
330 u8 ascq = event->reason >> 8;
332 sdev = scsi_device_lookup(shost, 0, target, lun);
334 pr_err("SCSI device %d 0 %d %d not found\n",
335 shost->host_no, target, lun);
339 /* Handle "Parameters changed", "Mode parameters changed", and
340 "Capacity data has changed". */
341 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
342 scsi_rescan_device(&sdev->sdev_gendev);
344 scsi_device_put(sdev);
347 static void virtscsi_handle_event(struct work_struct *work)
349 struct virtio_scsi_event_node *event_node =
350 container_of(work, struct virtio_scsi_event_node, work);
351 struct virtio_scsi *vscsi = event_node->vscsi;
352 struct virtio_scsi_event *event = &event_node->event;
354 if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
355 event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
356 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
359 switch (event->event) {
360 case VIRTIO_SCSI_T_NO_EVENT:
362 case VIRTIO_SCSI_T_TRANSPORT_RESET:
363 virtscsi_handle_transport_reset(vscsi, event);
365 case VIRTIO_SCSI_T_PARAM_CHANGE:
366 virtscsi_handle_param_change(vscsi, event);
369 pr_err("Unsupport virtio scsi event %x\n", event->event);
371 virtscsi_kick_event(vscsi, event_node);
374 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
376 struct virtio_scsi_event_node *event_node = buf;
378 INIT_WORK(&event_node->work, virtscsi_handle_event);
379 schedule_work(&event_node->work);
382 static void virtscsi_event_done(struct virtqueue *vq)
384 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
385 struct virtio_scsi *vscsi = shost_priv(sh);
387 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
391 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
392 * @vq : the struct virtqueue we're talking about
393 * @cmd : command structure
394 * @req_size : size of the request buffer
395 * @resp_size : size of the response buffer
397 static int virtscsi_add_cmd(struct virtqueue *vq,
398 struct virtio_scsi_cmd *cmd,
399 size_t req_size, size_t resp_size)
401 struct scsi_cmnd *sc = cmd->sc;
402 struct scatterlist *sgs[4], req, resp;
403 struct sg_table *out, *in;
404 unsigned out_num = 0, in_num = 0;
408 if (sc && sc->sc_data_direction != DMA_NONE) {
409 if (sc->sc_data_direction != DMA_FROM_DEVICE)
410 out = &scsi_out(sc)->table;
411 if (sc->sc_data_direction != DMA_TO_DEVICE)
412 in = &scsi_in(sc)->table;
415 /* Request header. */
416 sg_init_one(&req, &cmd->req, req_size);
417 sgs[out_num++] = &req;
419 /* Data-out buffer. */
421 sgs[out_num++] = out->sgl;
423 /* Response header. */
424 sg_init_one(&resp, &cmd->resp, resp_size);
425 sgs[out_num + in_num++] = &resp;
429 sgs[out_num + in_num++] = in->sgl;
431 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
434 static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
435 struct virtio_scsi_cmd *cmd,
436 size_t req_size, size_t resp_size)
440 bool needs_kick = false;
442 spin_lock_irqsave(&vq->vq_lock, flags);
443 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
445 needs_kick = virtqueue_kick_prepare(vq->vq);
447 spin_unlock_irqrestore(&vq->vq_lock, flags);
450 virtqueue_notify(vq->vq);
454 static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
455 struct virtio_scsi_vq *req_vq,
456 struct scsi_cmnd *sc)
458 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
459 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
461 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
463 /* TODO: check feature bit and fail if unsupported? */
464 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
466 dev_dbg(&sc->device->sdev_gendev,
467 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
469 memset(cmd, 0, sizeof(*cmd));
471 cmd->req.cmd = (struct virtio_scsi_cmd_req){
473 .lun[1] = sc->device->id,
474 .lun[2] = (sc->device->lun >> 8) | 0x40,
475 .lun[3] = sc->device->lun & 0xff,
476 .tag = (unsigned long)sc,
477 .task_attr = VIRTIO_SCSI_S_SIMPLE,
482 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
483 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
485 if (virtscsi_kick_cmd(req_vq, cmd,
486 sizeof cmd->req.cmd, sizeof cmd->resp.cmd) != 0)
487 return SCSI_MLQUEUE_HOST_BUSY;
491 static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
492 struct scsi_cmnd *sc)
494 struct virtio_scsi *vscsi = shost_priv(sh);
495 struct virtio_scsi_target_state *tgt =
496 scsi_target(sc->device)->hostdata;
498 atomic_inc(&tgt->reqs);
499 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
502 static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
503 struct virtio_scsi_target_state *tgt)
505 struct virtio_scsi_vq *vq;
509 spin_lock_irqsave(&tgt->tgt_lock, flags);
511 if (atomic_inc_return(&tgt->reqs) > 1)
514 queue_num = smp_processor_id();
515 while (unlikely(queue_num >= vscsi->num_queues))
516 queue_num -= vscsi->num_queues;
518 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
521 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
525 static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
526 struct scsi_cmnd *sc)
528 struct virtio_scsi *vscsi = shost_priv(sh);
529 struct virtio_scsi_target_state *tgt =
530 scsi_target(sc->device)->hostdata;
531 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
533 return virtscsi_queuecommand(vscsi, req_vq, sc);
536 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
538 DECLARE_COMPLETION_ONSTACK(comp);
542 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
543 sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
546 wait_for_completion(&comp);
547 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
548 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
552 mempool_free(cmd, virtscsi_cmd_pool);
556 static int virtscsi_device_reset(struct scsi_cmnd *sc)
558 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
559 struct virtio_scsi_cmd *cmd;
561 sdev_printk(KERN_INFO, sc->device, "device reset\n");
562 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
566 memset(cmd, 0, sizeof(*cmd));
568 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
569 .type = VIRTIO_SCSI_T_TMF,
570 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
572 .lun[1] = sc->device->id,
573 .lun[2] = (sc->device->lun >> 8) | 0x40,
574 .lun[3] = sc->device->lun & 0xff,
576 return virtscsi_tmf(vscsi, cmd);
579 static int virtscsi_abort(struct scsi_cmnd *sc)
581 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
582 struct virtio_scsi_cmd *cmd;
584 scmd_printk(KERN_INFO, sc, "abort\n");
585 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
589 memset(cmd, 0, sizeof(*cmd));
591 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
592 .type = VIRTIO_SCSI_T_TMF,
593 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
595 .lun[1] = sc->device->id,
596 .lun[2] = (sc->device->lun >> 8) | 0x40,
597 .lun[3] = sc->device->lun & 0xff,
598 .tag = (unsigned long)sc,
600 return virtscsi_tmf(vscsi, cmd);
603 static int virtscsi_target_alloc(struct scsi_target *starget)
605 struct virtio_scsi_target_state *tgt =
606 kmalloc(sizeof(*tgt), GFP_KERNEL);
610 spin_lock_init(&tgt->tgt_lock);
611 atomic_set(&tgt->reqs, 0);
614 starget->hostdata = tgt;
618 static void virtscsi_target_destroy(struct scsi_target *starget)
620 struct virtio_scsi_target_state *tgt = starget->hostdata;
624 static struct scsi_host_template virtscsi_host_template_single = {
625 .module = THIS_MODULE,
626 .name = "Virtio SCSI HBA",
627 .proc_name = "virtio_scsi",
629 .cmd_size = sizeof(struct virtio_scsi_cmd),
630 .queuecommand = virtscsi_queuecommand_single,
631 .eh_abort_handler = virtscsi_abort,
632 .eh_device_reset_handler = virtscsi_device_reset,
635 .dma_boundary = UINT_MAX,
636 .use_clustering = ENABLE_CLUSTERING,
637 .target_alloc = virtscsi_target_alloc,
638 .target_destroy = virtscsi_target_destroy,
641 static struct scsi_host_template virtscsi_host_template_multi = {
642 .module = THIS_MODULE,
643 .name = "Virtio SCSI HBA",
644 .proc_name = "virtio_scsi",
646 .cmd_size = sizeof(struct virtio_scsi_cmd),
647 .queuecommand = virtscsi_queuecommand_multi,
648 .eh_abort_handler = virtscsi_abort,
649 .eh_device_reset_handler = virtscsi_device_reset,
652 .dma_boundary = UINT_MAX,
653 .use_clustering = ENABLE_CLUSTERING,
654 .target_alloc = virtscsi_target_alloc,
655 .target_destroy = virtscsi_target_destroy,
658 #define virtscsi_config_get(vdev, fld) \
660 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
661 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
665 #define virtscsi_config_set(vdev, fld, val) \
667 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
668 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
671 static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
676 /* In multiqueue mode, when the number of cpu is equal
677 * to the number of request queues, we let the qeueues
678 * to be private to one cpu by setting the affinity hint
679 * to eliminate the contention.
681 if ((vscsi->num_queues == 1 ||
682 vscsi->num_queues != num_online_cpus()) && affinity) {
683 if (vscsi->affinity_hint_set)
691 for_each_online_cpu(cpu) {
692 virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
696 vscsi->affinity_hint_set = true;
698 for (i = 0; i < vscsi->num_queues; i++) {
699 if (!vscsi->req_vqs[i].vq)
702 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
705 vscsi->affinity_hint_set = false;
709 static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
712 __virtscsi_set_affinity(vscsi, affinity);
716 static int virtscsi_cpu_callback(struct notifier_block *nfb,
717 unsigned long action, void *hcpu)
719 struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
722 case CPU_ONLINE_FROZEN:
724 case CPU_DEAD_FROZEN:
725 __virtscsi_set_affinity(vscsi, true);
733 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
734 struct virtqueue *vq)
736 spin_lock_init(&virtscsi_vq->vq_lock);
737 virtscsi_vq->vq = vq;
740 static void virtscsi_scan(struct virtio_device *vdev)
742 struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
744 scsi_scan_host(shost);
747 static void virtscsi_remove_vqs(struct virtio_device *vdev)
749 struct Scsi_Host *sh = virtio_scsi_host(vdev);
750 struct virtio_scsi *vscsi = shost_priv(sh);
752 virtscsi_set_affinity(vscsi, false);
754 /* Stop all the virtqueues. */
755 vdev->config->reset(vdev);
757 vdev->config->del_vqs(vdev);
760 static int virtscsi_init(struct virtio_device *vdev,
761 struct virtio_scsi *vscsi)
766 vq_callback_t **callbacks;
768 struct virtqueue **vqs;
770 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
771 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
772 callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
773 names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
775 if (!callbacks || !vqs || !names) {
780 callbacks[0] = virtscsi_ctrl_done;
781 callbacks[1] = virtscsi_event_done;
782 names[0] = "control";
784 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
785 callbacks[i] = virtscsi_req_done;
786 names[i] = "request";
789 /* Discover virtqueues and write information to configuration. */
790 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
794 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
795 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
796 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
797 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
800 virtscsi_set_affinity(vscsi, true);
802 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
803 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
805 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
806 virtscsi_kick_event_all(vscsi);
815 virtscsi_remove_vqs(vdev);
819 static int virtscsi_probe(struct virtio_device *vdev)
821 struct Scsi_Host *shost;
822 struct virtio_scsi *vscsi;
824 u32 sg_elems, num_targets;
827 struct scsi_host_template *hostt;
829 /* We need to know how many queues before we allocate. */
830 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
832 num_targets = virtscsi_config_get(vdev, max_target) + 1;
835 hostt = &virtscsi_host_template_single;
837 hostt = &virtscsi_host_template_multi;
839 shost = scsi_host_alloc(hostt,
840 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
844 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
845 shost->sg_tablesize = sg_elems;
846 vscsi = shost_priv(shost);
848 vscsi->num_queues = num_queues;
851 err = virtscsi_init(vdev, vscsi);
853 goto virtscsi_init_failed;
855 vscsi->nb.notifier_call = &virtscsi_cpu_callback;
856 err = register_hotcpu_notifier(&vscsi->nb);
858 pr_err("registering cpu notifier failed\n");
859 goto scsi_add_host_failed;
862 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
863 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
864 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
866 /* LUNs > 256 are reported with format 1, so they go in the range
869 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
870 shost->max_id = num_targets;
871 shost->max_channel = 0;
872 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
873 err = scsi_add_host(shost, &vdev->dev);
875 goto scsi_add_host_failed;
877 * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
878 * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
882 scsi_add_host_failed:
883 vdev->config->del_vqs(vdev);
884 virtscsi_init_failed:
885 scsi_host_put(shost);
889 static void virtscsi_remove(struct virtio_device *vdev)
891 struct Scsi_Host *shost = virtio_scsi_host(vdev);
892 struct virtio_scsi *vscsi = shost_priv(shost);
894 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
895 virtscsi_cancel_event_work(vscsi);
897 scsi_remove_host(shost);
899 unregister_hotcpu_notifier(&vscsi->nb);
901 virtscsi_remove_vqs(vdev);
902 scsi_host_put(shost);
905 #ifdef CONFIG_PM_SLEEP
906 static int virtscsi_freeze(struct virtio_device *vdev)
908 struct Scsi_Host *sh = virtio_scsi_host(vdev);
909 struct virtio_scsi *vscsi = shost_priv(sh);
911 unregister_hotcpu_notifier(&vscsi->nb);
912 virtscsi_remove_vqs(vdev);
916 static int virtscsi_restore(struct virtio_device *vdev)
918 struct Scsi_Host *sh = virtio_scsi_host(vdev);
919 struct virtio_scsi *vscsi = shost_priv(sh);
922 err = virtscsi_init(vdev, vscsi);
926 err = register_hotcpu_notifier(&vscsi->nb);
928 vdev->config->del_vqs(vdev);
934 static struct virtio_device_id id_table[] = {
935 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
939 static unsigned int features[] = {
940 VIRTIO_SCSI_F_HOTPLUG,
941 VIRTIO_SCSI_F_CHANGE,
944 static struct virtio_driver virtio_scsi_driver = {
945 .feature_table = features,
946 .feature_table_size = ARRAY_SIZE(features),
947 .driver.name = KBUILD_MODNAME,
948 .driver.owner = THIS_MODULE,
949 .id_table = id_table,
950 .probe = virtscsi_probe,
951 .scan = virtscsi_scan,
952 #ifdef CONFIG_PM_SLEEP
953 .freeze = virtscsi_freeze,
954 .restore = virtscsi_restore,
956 .remove = virtscsi_remove,
959 static int __init init(void)
963 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
964 if (!virtscsi_cmd_cache) {
965 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
971 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
973 if (!virtscsi_cmd_pool) {
974 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
977 ret = register_virtio_driver(&virtio_scsi_driver);
984 if (virtscsi_cmd_pool) {
985 mempool_destroy(virtscsi_cmd_pool);
986 virtscsi_cmd_pool = NULL;
988 if (virtscsi_cmd_cache) {
989 kmem_cache_destroy(virtscsi_cmd_cache);
990 virtscsi_cmd_cache = NULL;
995 static void __exit fini(void)
997 unregister_virtio_driver(&virtio_scsi_driver);
998 mempool_destroy(virtscsi_cmd_pool);
999 kmem_cache_destroy(virtscsi_cmd_cache);
1004 MODULE_DEVICE_TABLE(virtio, id_table);
1005 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1006 MODULE_LICENSE("GPL");