1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
28 /* For development, we want to crash whenever the ring is screwed. */
29 #define BAD_RING(_vq, fmt, args...) \
31 dev_err(&(_vq)->vq.vdev->dev, \
32 "%s:"fmt, (_vq)->vq.name, ##args); \
35 /* Caller is supposed to guarantee no reentry. */
36 #define START_USE(_vq) \
39 panic("%s:in_use = %i\n", \
40 (_vq)->vq.name, (_vq)->in_use); \
41 (_vq)->in_use = __LINE__; \
43 #define END_USE(_vq) \
44 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
46 #define BAD_RING(_vq, fmt, args...) \
48 dev_err(&_vq->vq.vdev->dev, \
49 "%s:"fmt, (_vq)->vq.name, ##args); \
50 (_vq)->broken = true; \
56 struct vring_virtqueue
60 /* Actual memory layout for this queue */
63 /* Can we use weak barriers? */
66 /* Other side has made a mess, don't try any more. */
69 /* Host supports indirect buffers */
72 /* Host publishes avail event idx */
75 /* Head of free buffer list. */
76 unsigned int free_head;
77 /* Number we've added since last sync. */
78 unsigned int num_added;
80 /* Last used index we've seen. */
83 /* How to notify other side. FIXME: commonalize hcalls! */
84 void (*notify)(struct virtqueue *vq);
87 /* They're supposed to lock for us. */
90 /* Figure out if their kicks are too delayed. */
91 bool last_add_time_valid;
92 ktime_t last_add_time;
95 /* Tokens for callbacks. */
99 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
101 static inline struct scatterlist *sg_next_chained(struct scatterlist *sg,
107 static inline struct scatterlist *sg_next_arr(struct scatterlist *sg,
115 /* Set up an indirect table of descriptors and add it to the queue. */
116 static inline int vring_add_indirect(struct vring_virtqueue *vq,
117 struct scatterlist *sgs[],
118 struct scatterlist *(*next)
119 (struct scatterlist *, unsigned int *),
120 unsigned int total_sg,
121 unsigned int total_out,
122 unsigned int total_in,
123 unsigned int out_sgs,
127 struct vring_desc *desc;
129 struct scatterlist *sg;
133 * We require lowmem mappings for the descriptors because
134 * otherwise virt_to_phys will give us bogus addresses in the
137 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
139 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
143 /* Transfer entries from the sg lists into the indirect page */
145 for (n = 0; n < out_sgs; n++) {
146 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
147 desc[i].flags = VRING_DESC_F_NEXT;
148 desc[i].addr = sg_phys(sg);
149 desc[i].len = sg->length;
154 for (; n < (out_sgs + in_sgs); n++) {
155 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
156 desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
157 desc[i].addr = sg_phys(sg);
158 desc[i].len = sg->length;
163 BUG_ON(i != total_sg);
165 /* Last one doesn't continue. */
166 desc[i-1].flags &= ~VRING_DESC_F_NEXT;
169 /* We're about to use a buffer */
172 /* Use a single buffer which doesn't continue */
173 head = vq->free_head;
174 vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
175 vq->vring.desc[head].addr = virt_to_phys(desc);
176 vq->vring.desc[head].len = i * sizeof(struct vring_desc);
178 /* Update free pointer */
179 vq->free_head = vq->vring.desc[head].next;
184 static inline int virtqueue_add(struct virtqueue *_vq,
185 struct scatterlist *sgs[],
186 struct scatterlist *(*next)
187 (struct scatterlist *, unsigned int *),
188 unsigned int total_out,
189 unsigned int total_in,
190 unsigned int out_sgs,
195 struct vring_virtqueue *vq = to_vvq(_vq);
196 struct scatterlist *sg;
197 unsigned int i, n, avail, uninitialized_var(prev), total_sg;
202 BUG_ON(data == NULL);
206 ktime_t now = ktime_get();
208 /* No kick or get, with .1 second between? Warn. */
209 if (vq->last_add_time_valid)
210 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
212 vq->last_add_time = now;
213 vq->last_add_time_valid = true;
217 total_sg = total_in + total_out;
219 /* If the host supports indirect descriptor tables, and we have multiple
220 * buffers, then go indirect. FIXME: tune this threshold */
221 if (vq->indirect && total_sg > 1 && vq->vq.num_free) {
222 head = vring_add_indirect(vq, sgs, next, total_sg, total_out,
224 out_sgs, in_sgs, gfp);
225 if (likely(head >= 0))
229 BUG_ON(total_sg > vq->vring.num);
230 BUG_ON(total_sg == 0);
232 if (vq->vq.num_free < total_sg) {
233 pr_debug("Can't add buf len %i - avail = %i\n",
234 total_sg, vq->vq.num_free);
235 /* FIXME: for historical reasons, we force a notify here if
236 * there are outgoing parts to the buffer. Presumably the
237 * host should service the ring ASAP. */
244 /* We're about to use some buffers from the free list. */
245 vq->vq.num_free -= total_sg;
247 head = i = vq->free_head;
248 for (n = 0; n < out_sgs; n++) {
249 for (sg = sgs[n]; sg; sg = next(sg, &total_out)) {
250 vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
251 vq->vring.desc[i].addr = sg_phys(sg);
252 vq->vring.desc[i].len = sg->length;
254 i = vq->vring.desc[i].next;
257 for (; n < (out_sgs + in_sgs); n++) {
258 for (sg = sgs[n]; sg; sg = next(sg, &total_in)) {
259 vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
260 vq->vring.desc[i].addr = sg_phys(sg);
261 vq->vring.desc[i].len = sg->length;
263 i = vq->vring.desc[i].next;
266 /* Last one doesn't continue. */
267 vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
269 /* Update free pointer */
274 vq->data[head] = data;
276 /* Put entry in available array (but don't update avail->idx until they
278 avail = (vq->vring.avail->idx & (vq->vring.num-1));
279 vq->vring.avail->ring[avail] = head;
281 /* Descriptors and available array need to be set before we expose the
282 * new available array entries. */
283 virtio_wmb(vq->weak_barriers);
284 vq->vring.avail->idx++;
287 /* This is very unlikely, but theoretically possible. Kick
289 if (unlikely(vq->num_added == (1 << 16) - 1))
292 pr_debug("Added buffer head %i to %p\n", head, vq);
299 * virtqueue_add_buf - expose buffer to other end
300 * @vq: the struct virtqueue we're talking about.
301 * @sg: the description of the buffer(s).
302 * @out_num: the number of sg readable by other side
303 * @in_num: the number of sg which are writable (after readable ones)
304 * @data: the token identifying the buffer.
305 * @gfp: how to do memory allocations (if necessary).
307 * Caller must ensure we don't call this with other virtqueue operations
308 * at the same time (except where noted).
310 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
312 int virtqueue_add_buf(struct virtqueue *_vq,
313 struct scatterlist sg[],
319 struct scatterlist *sgs[2];
324 return virtqueue_add(_vq, sgs, sg_next_arr,
325 out, in, out ? 1 : 0, in ? 1 : 0, data, gfp);
327 EXPORT_SYMBOL_GPL(virtqueue_add_buf);
330 * virtqueue_add_sgs - expose buffers to other end
331 * @vq: the struct virtqueue we're talking about.
332 * @sgs: array of terminated scatterlists.
333 * @out_num: the number of scatterlists readable by other side
334 * @in_num: the number of scatterlists which are writable (after readable ones)
335 * @data: the token identifying the buffer.
336 * @gfp: how to do memory allocations (if necessary).
338 * Caller must ensure we don't call this with other virtqueue operations
339 * at the same time (except where noted).
341 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
343 int virtqueue_add_sgs(struct virtqueue *_vq,
344 struct scatterlist *sgs[],
345 unsigned int out_sgs,
350 unsigned int i, total_out, total_in;
352 /* Count them first. */
353 for (i = total_out = total_in = 0; i < out_sgs; i++) {
354 struct scatterlist *sg;
355 for (sg = sgs[i]; sg; sg = sg_next(sg))
358 for (; i < out_sgs + in_sgs; i++) {
359 struct scatterlist *sg;
360 for (sg = sgs[i]; sg; sg = sg_next(sg))
363 return virtqueue_add(_vq, sgs, sg_next_chained,
364 total_out, total_in, out_sgs, in_sgs, data, gfp);
366 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
369 * virtqueue_add_outbuf - expose output buffers to other end
370 * @vq: the struct virtqueue we're talking about.
371 * @sgs: array of scatterlists (need not be terminated!)
372 * @num: the number of scatterlists readable by other side
373 * @data: the token identifying the buffer.
374 * @gfp: how to do memory allocations (if necessary).
376 * Caller must ensure we don't call this with other virtqueue operations
377 * at the same time (except where noted).
379 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
381 int virtqueue_add_outbuf(struct virtqueue *vq,
382 struct scatterlist sg[], unsigned int num,
386 return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp);
388 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
391 * virtqueue_add_inbuf - expose input buffers to other end
392 * @vq: the struct virtqueue we're talking about.
393 * @sgs: array of scatterlists (need not be terminated!)
394 * @num: the number of scatterlists writable by other side
395 * @data: the token identifying the buffer.
396 * @gfp: how to do memory allocations (if necessary).
398 * Caller must ensure we don't call this with other virtqueue operations
399 * at the same time (except where noted).
401 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
403 int virtqueue_add_inbuf(struct virtqueue *vq,
404 struct scatterlist sg[], unsigned int num,
408 return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp);
410 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
413 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
414 * @vq: the struct virtqueue
416 * Instead of virtqueue_kick(), you can do:
417 * if (virtqueue_kick_prepare(vq))
418 * virtqueue_notify(vq);
420 * This is sometimes useful because the virtqueue_kick_prepare() needs
421 * to be serialized, but the actual virtqueue_notify() call does not.
423 bool virtqueue_kick_prepare(struct virtqueue *_vq)
425 struct vring_virtqueue *vq = to_vvq(_vq);
430 /* We need to expose available array entries before checking avail
432 virtio_mb(vq->weak_barriers);
434 old = vq->vring.avail->idx - vq->num_added;
435 new = vq->vring.avail->idx;
439 if (vq->last_add_time_valid) {
440 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
441 vq->last_add_time)) > 100);
443 vq->last_add_time_valid = false;
447 needs_kick = vring_need_event(vring_avail_event(&vq->vring),
450 needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
455 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
458 * virtqueue_notify - second half of split virtqueue_kick call.
459 * @vq: the struct virtqueue
461 * This does not need to be serialized.
463 void virtqueue_notify(struct virtqueue *_vq)
465 struct vring_virtqueue *vq = to_vvq(_vq);
467 /* Prod other side to tell it about changes. */
470 EXPORT_SYMBOL_GPL(virtqueue_notify);
473 * virtqueue_kick - update after add_buf
474 * @vq: the struct virtqueue
476 * After one or more virtqueue_add_buf calls, invoke this to kick
479 * Caller must ensure we don't call this with other virtqueue
480 * operations at the same time (except where noted).
482 void virtqueue_kick(struct virtqueue *vq)
484 if (virtqueue_kick_prepare(vq))
485 virtqueue_notify(vq);
487 EXPORT_SYMBOL_GPL(virtqueue_kick);
489 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
493 /* Clear data ptr. */
494 vq->data[head] = NULL;
496 /* Put back on free list: find end */
499 /* Free the indirect table */
500 if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
501 kfree(phys_to_virt(vq->vring.desc[i].addr));
503 while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
504 i = vq->vring.desc[i].next;
508 vq->vring.desc[i].next = vq->free_head;
509 vq->free_head = head;
510 /* Plus final descriptor */
514 static inline bool more_used(const struct vring_virtqueue *vq)
516 return vq->last_used_idx != vq->vring.used->idx;
520 * virtqueue_get_buf - get the next used buffer
521 * @vq: the struct virtqueue we're talking about.
522 * @len: the length written into the buffer
524 * If the driver wrote data into the buffer, @len will be set to the
525 * amount written. This means you don't need to clear the buffer
526 * beforehand to ensure there's no data leakage in the case of short
529 * Caller must ensure we don't call this with other virtqueue
530 * operations at the same time (except where noted).
532 * Returns NULL if there are no used buffers, or the "data" token
533 * handed to virtqueue_add_buf().
535 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
537 struct vring_virtqueue *vq = to_vvq(_vq);
544 if (unlikely(vq->broken)) {
549 if (!more_used(vq)) {
550 pr_debug("No more buffers in queue\n");
555 /* Only get used array entries after they have been exposed by host. */
556 virtio_rmb(vq->weak_barriers);
558 last_used = (vq->last_used_idx & (vq->vring.num - 1));
559 i = vq->vring.used->ring[last_used].id;
560 *len = vq->vring.used->ring[last_used].len;
562 if (unlikely(i >= vq->vring.num)) {
563 BAD_RING(vq, "id %u out of range\n", i);
566 if (unlikely(!vq->data[i])) {
567 BAD_RING(vq, "id %u is not a head!\n", i);
571 /* detach_buf clears data, so grab it now. */
575 /* If we expect an interrupt for the next entry, tell host
576 * by writing event index and flush out the write before
577 * the read in the next get_buf call. */
578 if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
579 vring_used_event(&vq->vring) = vq->last_used_idx;
580 virtio_mb(vq->weak_barriers);
584 vq->last_add_time_valid = false;
590 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
593 * virtqueue_disable_cb - disable callbacks
594 * @vq: the struct virtqueue we're talking about.
596 * Note that this is not necessarily synchronous, hence unreliable and only
597 * useful as an optimization.
599 * Unlike other operations, this need not be serialized.
601 void virtqueue_disable_cb(struct virtqueue *_vq)
603 struct vring_virtqueue *vq = to_vvq(_vq);
605 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
607 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
610 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
611 * @vq: the struct virtqueue we're talking about.
613 * This re-enables callbacks; it returns current queue state
614 * in an opaque unsigned value. This value should be later tested by
615 * virtqueue_poll, to detect a possible race between the driver checking for
616 * more work, and enabling callbacks.
618 * Caller must ensure we don't call this with other virtqueue
619 * operations at the same time (except where noted).
621 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
623 struct vring_virtqueue *vq = to_vvq(_vq);
628 /* We optimistically turn back on interrupts, then check if there was
630 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
631 * either clear the flags bit or point the event index at the next
632 * entry. Always do both to keep code simple. */
633 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
634 vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
636 return last_used_idx;
638 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
641 * virtqueue_poll - query pending used buffers
642 * @vq: the struct virtqueue we're talking about.
643 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
645 * Returns "true" if there are pending used buffers in the queue.
647 * This does not need to be serialized.
649 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
651 struct vring_virtqueue *vq = to_vvq(_vq);
653 virtio_mb(vq->weak_barriers);
654 return (u16)last_used_idx != vq->vring.used->idx;
656 EXPORT_SYMBOL_GPL(virtqueue_poll);
659 * virtqueue_enable_cb - restart callbacks after disable_cb.
660 * @vq: the struct virtqueue we're talking about.
662 * This re-enables callbacks; it returns "false" if there are pending
663 * buffers in the queue, to detect a possible race between the driver
664 * checking for more work, and enabling callbacks.
666 * Caller must ensure we don't call this with other virtqueue
667 * operations at the same time (except where noted).
669 bool virtqueue_enable_cb(struct virtqueue *_vq)
671 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
672 return !virtqueue_poll(_vq, last_used_idx);
674 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
677 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
678 * @vq: the struct virtqueue we're talking about.
680 * This re-enables callbacks but hints to the other side to delay
681 * interrupts until most of the available buffers have been processed;
682 * it returns "false" if there are many pending buffers in the queue,
683 * to detect a possible race between the driver checking for more work,
684 * and enabling callbacks.
686 * Caller must ensure we don't call this with other virtqueue
687 * operations at the same time (except where noted).
689 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
691 struct vring_virtqueue *vq = to_vvq(_vq);
696 /* We optimistically turn back on interrupts, then check if there was
698 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
699 * either clear the flags bit or point the event index at the next
700 * entry. Always do both to keep code simple. */
701 vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
702 /* TODO: tune this threshold */
703 bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
704 vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
705 virtio_mb(vq->weak_barriers);
706 if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
714 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
717 * virtqueue_detach_unused_buf - detach first unused buffer
718 * @vq: the struct virtqueue we're talking about.
720 * Returns NULL or the "data" token handed to virtqueue_add_buf().
721 * This is not valid on an active queue; it is useful only for device
724 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
726 struct vring_virtqueue *vq = to_vvq(_vq);
732 for (i = 0; i < vq->vring.num; i++) {
735 /* detach_buf clears data, so grab it now. */
738 vq->vring.avail->idx--;
742 /* That should have freed everything. */
743 BUG_ON(vq->vq.num_free != vq->vring.num);
748 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
750 irqreturn_t vring_interrupt(int irq, void *_vq)
752 struct vring_virtqueue *vq = to_vvq(_vq);
754 if (!more_used(vq)) {
755 pr_debug("virtqueue interrupt with no work for %p\n", vq);
759 if (unlikely(vq->broken))
762 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
764 vq->vq.callback(&vq->vq);
768 EXPORT_SYMBOL_GPL(vring_interrupt);
770 struct virtqueue *vring_new_virtqueue(unsigned int index,
772 unsigned int vring_align,
773 struct virtio_device *vdev,
776 void (*notify)(struct virtqueue *),
777 void (*callback)(struct virtqueue *),
780 struct vring_virtqueue *vq;
783 /* We assume num is a power of 2. */
784 if (num & (num - 1)) {
785 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
789 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
793 vring_init(&vq->vring, num, pages, vring_align);
794 vq->vq.callback = callback;
797 vq->vq.num_free = num;
798 vq->vq.index = index;
800 vq->weak_barriers = weak_barriers;
802 vq->last_used_idx = 0;
804 list_add_tail(&vq->vq.list, &vdev->vqs);
807 vq->last_add_time_valid = false;
810 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
811 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
813 /* No callback? Tell other side not to bother us. */
815 vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
817 /* Put everything in free lists. */
819 for (i = 0; i < num-1; i++) {
820 vq->vring.desc[i].next = i+1;
827 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
829 void vring_del_virtqueue(struct virtqueue *vq)
834 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
836 /* Manipulates transport-specific feature bits. */
837 void vring_transport_features(struct virtio_device *vdev)
841 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
843 case VIRTIO_RING_F_INDIRECT_DESC:
845 case VIRTIO_RING_F_EVENT_IDX:
848 /* We don't understand this bit. */
849 clear_bit(i, vdev->features);
853 EXPORT_SYMBOL_GPL(vring_transport_features);
856 * virtqueue_get_vring_size - return the size of the virtqueue's vring
857 * @vq: the struct virtqueue containing the vring of interest.
859 * Returns the size of the vring. This is mainly used for boasting to
860 * userspace. Unlike other operations, this need not be serialized.
862 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
865 struct vring_virtqueue *vq = to_vvq(_vq);
867 return vq->vring.num;
869 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
871 MODULE_LICENSE("GPL");