2 * This is the Launcher code, a simple program which lays out the "physical"
3 * memory for the new Guest by mapping the kernel image and the virtual
4 * devices, then opens /dev/lguest to tell the kernel about the Guest and
7 #define _LARGEFILE64_SOURCE
17 #include <sys/param.h>
18 #include <sys/types.h>
21 #include <sys/eventfd.h>
26 #include <sys/socket.h>
27 #include <sys/ioctl.h>
30 #include <netinet/in.h>
32 #include <linux/sockios.h>
33 #include <linux/if_tun.h>
45 #include <linux/pci_regs.h>
47 #ifndef VIRTIO_F_ANY_LAYOUT
48 #define VIRTIO_F_ANY_LAYOUT 27
52 * We can ignore the 43 include files we need for this program, but I do want
53 * to draw attention to the use of kernel-style types.
55 * As Linus said, "C is a Spartan language, and so should your naming be." I
56 * like these abbreviations, so we define them here. Note that u64 is always
57 * unsigned long long, which works on all Linux systems: this means that we can
58 * use %llu in printf for any u64.
60 typedef unsigned long long u64;
66 #define VIRTIO_CONFIG_NO_LEGACY
67 #define VIRTIO_PCI_NO_LEGACY
68 #define VIRTIO_BLK_NO_LEGACY
70 /* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */
71 #include "../../include/uapi/linux/virtio_config.h"
72 #include "../../include/uapi/linux/virtio_net.h"
73 #include "../../include/uapi/linux/virtio_blk.h"
74 #include <linux/virtio_console.h>
75 #include "../../include/uapi/linux/virtio_rng.h"
76 #include <linux/virtio_ring.h>
77 #include "../../include/uapi/linux/virtio_pci.h"
78 #include <asm/bootparam.h>
79 #include "../../include/linux/lguest_launcher.h"
81 #define BRIDGE_PFX "bridge:"
83 #define SIOCBRADDIF 0x89a2 /* add interface to bridge */
85 /* We can have up to 256 pages for devices. */
86 #define DEVICE_PAGES 256
87 /* This will occupy 3 pages: it must be a power of 2. */
88 #define VIRTQUEUE_NUM 256
91 * verbose is both a global flag and a macro. The C preprocessor allows
92 * this, and although I wouldn't recommend it, it works quite nicely here.
95 #define verbose(args...) \
96 do { if (verbose) printf(args); } while(0)
99 /* The pointer to the start of guest memory. */
100 static void *guest_base;
101 /* The maximum guest physical address allowed, and maximum possible. */
102 static unsigned long guest_limit, guest_max, guest_mmio;
103 /* The /dev/lguest file descriptor. */
104 static int lguest_fd;
106 /* a per-cpu variable indicating whose vcpu is currently running */
107 static unsigned int __thread cpu_id;
109 /* 5 bit device number in the PCI_CONFIG_ADDR => 32 only */
110 #define MAX_PCI_DEVICES 32
112 /* This is our list of devices. */
114 /* Counter to assign interrupt numbers. */
115 unsigned int next_irq;
117 /* Counter to print out convenient device numbers. */
118 unsigned int device_num;
121 struct device *pci[MAX_PCI_DEVICES];
124 /* The list of Guest devices, based on command line arguments. */
125 static struct device_list devices;
127 struct virtio_pci_cfg_cap {
128 struct virtio_pci_cap cap;
129 u32 window; /* Data for BAR access. */
132 struct virtio_pci_mmio {
133 struct virtio_pci_common_cfg cfg;
137 /* Device-specific configuration follows this. */
140 /* This is the layout (little-endian) of the PCI config space. */
142 u16 vendor_id, device_id;
144 u8 revid, prog_if, subclass, class;
145 u8 cacheline_size, lat_timer, header_type, bist;
148 u16 subsystem_vendor_id, subsystem_device_id;
149 u32 expansion_rom_addr;
150 u8 capabilities, reserved1[3];
152 u8 irq_line, irq_pin, min_grant, max_latency;
154 /* Now, this is the linked capability list. */
155 struct virtio_pci_cap common;
156 struct virtio_pci_notify_cap notify;
157 struct virtio_pci_cap isr;
158 struct virtio_pci_cap device;
159 /* FIXME: Implement this! */
160 struct virtio_pci_cfg_cap cfg_access;
163 /* The device structure describes a single device. */
165 /* The name of this device, for --verbose. */
168 /* Any queues attached to this device */
169 struct virtqueue *vq;
171 /* Is it operational */
174 /* PCI configuration */
176 struct pci_config config;
177 u32 config_words[sizeof(struct pci_config) / sizeof(u32)];
180 /* Features we offer, and those accepted. */
181 u64 features, features_accepted;
183 /* Device-specific config hangs off the end of this. */
184 struct virtio_pci_mmio *mmio;
186 /* PCI MMIO resources (all in BAR0) */
190 /* Device-specific data. */
194 /* The virtqueue structure describes a queue attached to a device. */
196 struct virtqueue *next;
198 /* Which device owns me. */
201 /* The actual ring of buffers. */
204 /* The information about this virtqueue (we only use queue_size on) */
205 struct virtio_pci_common_cfg pci_config;
207 /* Last available index we saw. */
210 /* How many are used since we sent last irq? */
211 unsigned int pending_used;
213 /* Eventfd where Guest notifications arrive. */
216 /* Function for the thread which is servicing this virtqueue. */
217 void (*service)(struct virtqueue *vq);
221 /* Remember the arguments to the program so we can "reboot" */
222 static char **main_args;
224 /* The original tty settings to restore on exit. */
225 static struct termios orig_term;
228 * We have to be careful with barriers: our devices are all run in separate
229 * threads and so we need to make sure that changes visible to the Guest happen
232 #define wmb() __asm__ __volatile__("" : : : "memory")
233 #define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
234 #define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
236 /* Wrapper for the last available index. Makes it easier to change. */
237 #define lg_last_avail(vq) ((vq)->last_avail_idx)
240 * The virtio configuration space is defined to be little-endian. x86 is
241 * little-endian too, but it's nice to be explicit so we have these helpers.
243 #define cpu_to_le16(v16) (v16)
244 #define cpu_to_le32(v32) (v32)
245 #define cpu_to_le64(v64) (v64)
246 #define le16_to_cpu(v16) (v16)
247 #define le32_to_cpu(v32) (v32)
248 #define le64_to_cpu(v64) (v64)
250 /* Is this iovec empty? */
251 static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
255 for (i = 0; i < num_iov; i++)
261 /* Take len bytes from the front of this iovec. */
262 static void iov_consume(struct iovec iov[], unsigned num_iov,
263 void *dest, unsigned len)
267 for (i = 0; i < num_iov; i++) {
270 used = iov[i].iov_len < len ? iov[i].iov_len : len;
272 memcpy(dest, iov[i].iov_base, used);
275 iov[i].iov_base += used;
276 iov[i].iov_len -= used;
280 errx(1, "iovec too short!");
284 * The Launcher code itself takes us out into userspace, that scary place where
285 * pointers run wild and free! Unfortunately, like most userspace programs,
286 * it's quite boring (which is why everyone likes to hack on the kernel!).
287 * Perhaps if you make up an Lguest Drinking Game at this point, it will get
288 * you through this section. Or, maybe not.
290 * The Launcher sets up a big chunk of memory to be the Guest's "physical"
291 * memory and stores it in "guest_base". In other words, Guest physical ==
292 * Launcher virtual with an offset.
294 * This can be tough to get your head around, but usually it just means that we
295 * use these trivial conversion functions when the Guest gives us its
296 * "physical" addresses:
298 static void *from_guest_phys(unsigned long addr)
300 return guest_base + addr;
303 static unsigned long to_guest_phys(const void *addr)
305 return (addr - guest_base);
309 * Loading the Kernel.
311 * We start with couple of simple helper routines. open_or_die() avoids
312 * error-checking code cluttering the callers:
314 static int open_or_die(const char *name, int flags)
316 int fd = open(name, flags);
318 err(1, "Failed to open %s", name);
322 /* map_zeroed_pages() takes a number of pages. */
323 static void *map_zeroed_pages(unsigned int num)
325 int fd = open_or_die("/dev/zero", O_RDONLY);
329 * We use a private mapping (ie. if we write to the page, it will be
330 * copied). We allocate an extra two pages PROT_NONE to act as guard
331 * pages against read/write attempts that exceed allocated space.
333 addr = mmap(NULL, getpagesize() * (num+2),
334 PROT_NONE, MAP_PRIVATE, fd, 0);
336 if (addr == MAP_FAILED)
337 err(1, "Mmapping %u pages of /dev/zero", num);
339 if (mprotect(addr + getpagesize(), getpagesize() * num,
340 PROT_READ|PROT_WRITE) == -1)
341 err(1, "mprotect rw %u pages failed", num);
344 * One neat mmap feature is that you can close the fd, and it
349 /* Return address after PROT_NONE page */
350 return addr + getpagesize();
353 /* Get some bytes which won't be mapped into the guest. */
354 static unsigned long get_mmio_region(size_t size)
356 unsigned long addr = guest_mmio;
362 /* Size has to be a power of 2 (and multiple of 16) */
363 for (i = 1; i < size; i <<= 1);
371 * This routine is used to load the kernel or initrd. It tries mmap, but if
372 * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries),
373 * it falls back to reading the memory in.
375 static void map_at(int fd, void *addr, unsigned long offset, unsigned long len)
380 * We map writable even though for some segments are marked read-only.
381 * The kernel really wants to be writable: it patches its own
384 * MAP_PRIVATE means that the page won't be copied until a write is
385 * done to it. This allows us to share untouched memory between
388 if (mmap(addr, len, PROT_READ|PROT_WRITE,
389 MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED)
392 /* pread does a seek and a read in one shot: saves a few lines. */
393 r = pread(fd, addr, len, offset);
395 err(1, "Reading offset %lu len %lu gave %zi", offset, len, r);
399 * This routine takes an open vmlinux image, which is in ELF, and maps it into
400 * the Guest memory. ELF = Embedded Linking Format, which is the format used
401 * by all modern binaries on Linux including the kernel.
403 * The ELF headers give *two* addresses: a physical address, and a virtual
404 * address. We use the physical address; the Guest will map itself to the
407 * We return the starting address.
409 static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr)
411 Elf32_Phdr phdr[ehdr->e_phnum];
415 * Sanity checks on the main ELF header: an x86 executable with a
416 * reasonable number of correctly-sized program headers.
418 if (ehdr->e_type != ET_EXEC
419 || ehdr->e_machine != EM_386
420 || ehdr->e_phentsize != sizeof(Elf32_Phdr)
421 || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr))
422 errx(1, "Malformed elf header");
425 * An ELF executable contains an ELF header and a number of "program"
426 * headers which indicate which parts ("segments") of the program to
430 /* We read in all the program headers at once: */
431 if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0)
432 err(1, "Seeking to program headers");
433 if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr))
434 err(1, "Reading program headers");
437 * Try all the headers: there are usually only three. A read-only one,
438 * a read-write one, and a "note" section which we don't load.
440 for (i = 0; i < ehdr->e_phnum; i++) {
441 /* If this isn't a loadable segment, we ignore it */
442 if (phdr[i].p_type != PT_LOAD)
445 verbose("Section %i: size %i addr %p\n",
446 i, phdr[i].p_memsz, (void *)phdr[i].p_paddr);
448 /* We map this section of the file at its physical address. */
449 map_at(elf_fd, from_guest_phys(phdr[i].p_paddr),
450 phdr[i].p_offset, phdr[i].p_filesz);
453 /* The entry point is given in the ELF header. */
454 return ehdr->e_entry;
458 * A bzImage, unlike an ELF file, is not meant to be loaded. You're supposed
459 * to jump into it and it will unpack itself. We used to have to perform some
460 * hairy magic because the unpacking code scared me.
462 * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote
463 * a small patch to jump over the tricky bits in the Guest, so now we just read
464 * the funky header so we know where in the file to load, and away we go!
466 static unsigned long load_bzimage(int fd)
468 struct boot_params boot;
470 /* Modern bzImages get loaded at 1M. */
471 void *p = from_guest_phys(0x100000);
474 * Go back to the start of the file and read the header. It should be
475 * a Linux boot header (see Documentation/x86/boot.txt)
477 lseek(fd, 0, SEEK_SET);
478 read(fd, &boot, sizeof(boot));
480 /* Inside the setup_hdr, we expect the magic "HdrS" */
481 if (memcmp(&boot.hdr.header, "HdrS", 4) != 0)
482 errx(1, "This doesn't look like a bzImage to me");
484 /* Skip over the extra sectors of the header. */
485 lseek(fd, (boot.hdr.setup_sects+1) * 512, SEEK_SET);
487 /* Now read everything into memory. in nice big chunks. */
488 while ((r = read(fd, p, 65536)) > 0)
491 /* Finally, code32_start tells us where to enter the kernel. */
492 return boot.hdr.code32_start;
496 * Loading the kernel is easy when it's a "vmlinux", but most kernels
497 * come wrapped up in the self-decompressing "bzImage" format. With a little
498 * work, we can load those, too.
500 static unsigned long load_kernel(int fd)
504 /* Read in the first few bytes. */
505 if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr))
506 err(1, "Reading kernel");
508 /* If it's an ELF file, it starts with "\177ELF" */
509 if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0)
510 return map_elf(fd, &hdr);
512 /* Otherwise we assume it's a bzImage, and try to load it. */
513 return load_bzimage(fd);
517 * This is a trivial little helper to align pages. Andi Kleen hated it because
518 * it calls getpagesize() twice: "it's dumb code."
520 * Kernel guys get really het up about optimization, even when it's not
521 * necessary. I leave this code as a reaction against that.
523 static inline unsigned long page_align(unsigned long addr)
525 /* Add upwards and truncate downwards. */
526 return ((addr + getpagesize()-1) & ~(getpagesize()-1));
530 * An "initial ram disk" is a disk image loaded into memory along with the
531 * kernel which the kernel can use to boot from without needing any drivers.
532 * Most distributions now use this as standard: the initrd contains the code to
533 * load the appropriate driver modules for the current machine.
535 * Importantly, James Morris works for RedHat, and Fedora uses initrds for its
536 * kernels. He sent me this (and tells me when I break it).
538 static unsigned long load_initrd(const char *name, unsigned long mem)
544 ifd = open_or_die(name, O_RDONLY);
545 /* fstat() is needed to get the file size. */
546 if (fstat(ifd, &st) < 0)
547 err(1, "fstat() on initrd '%s'", name);
550 * We map the initrd at the top of memory, but mmap wants it to be
551 * page-aligned, so we round the size up for that.
553 len = page_align(st.st_size);
554 map_at(ifd, from_guest_phys(mem - len), 0, st.st_size);
556 * Once a file is mapped, you can close the file descriptor. It's a
557 * little odd, but quite useful.
560 verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len);
562 /* We return the initrd size. */
568 * Simple routine to roll all the commandline arguments together with spaces
571 static void concat(char *dst, char *args[])
573 unsigned int i, len = 0;
575 for (i = 0; args[i]; i++) {
577 strcat(dst+len, " ");
580 strcpy(dst+len, args[i]);
581 len += strlen(args[i]);
583 /* In case it's empty. */
588 * This is where we actually tell the kernel to initialize the Guest. We
589 * saw the arguments it expects when we looked at initialize() in lguest_user.c:
590 * the base of Guest "physical" memory, the top physical page to allow and the
591 * entry point for the Guest.
593 static void tell_kernel(unsigned long start)
595 unsigned long args[] = { LHREQ_INITIALIZE,
596 (unsigned long)guest_base,
597 guest_limit / getpagesize(), start,
598 (guest_mmio+getpagesize()-1) / getpagesize() };
599 verbose("Guest: %p - %p (%#lx, MMIO %#lx)\n",
600 guest_base, guest_base + guest_limit,
601 guest_limit, guest_mmio);
602 lguest_fd = open_or_die("/dev/lguest", O_RDWR);
603 if (write(lguest_fd, args, sizeof(args)) < 0)
604 err(1, "Writing to /dev/lguest");
611 * When the Guest gives us a buffer, it sends an array of addresses and sizes.
612 * We need to make sure it's not trying to reach into the Launcher itself, so
613 * we have a convenient routine which checks it and exits with an error message
614 * if something funny is going on:
616 static void *_check_pointer(unsigned long addr, unsigned int size,
620 * Check if the requested address and size exceeds the allocated memory,
621 * or addr + size wraps around.
623 if ((addr + size) > guest_limit || (addr + size) < addr)
624 errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr);
626 * We return a pointer for the caller's convenience, now we know it's
629 return from_guest_phys(addr);
631 /* A macro which transparently hands the line number to the real function. */
632 #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)
635 * Each buffer in the virtqueues is actually a chain of descriptors. This
636 * function returns the next descriptor in the chain, or vq->vring.num if we're
639 static unsigned next_desc(struct vring_desc *desc,
640 unsigned int i, unsigned int max)
644 /* If this descriptor says it doesn't chain, we're done. */
645 if (!(desc[i].flags & VRING_DESC_F_NEXT))
648 /* Check they're not leading us off end of descriptors. */
650 /* Make sure compiler knows to grab that: we don't want it changing! */
654 errx(1, "Desc next is %u", next);
660 * This actually sends the interrupt for this virtqueue, if we've used a
663 static void trigger_irq(struct virtqueue *vq)
665 unsigned long buf[] = { LHREQ_IRQ, vq->dev->config.irq_line };
667 /* Don't inform them if nothing used. */
668 if (!vq->pending_used)
670 vq->pending_used = 0;
672 /* If they don't want an interrupt, don't send one... */
673 if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
677 /* Set isr to 1 (queue interrupt pending) */
678 vq->dev->mmio->isr = 0x1;
680 /* Send the Guest an interrupt tell them we used something up. */
681 if (write(lguest_fd, buf, sizeof(buf)) != 0)
682 err(1, "Triggering irq %i", vq->dev->config.irq_line);
686 * This looks in the virtqueue for the first available buffer, and converts
687 * it to an iovec for convenient access. Since descriptors consist of some
688 * number of output then some number of input descriptors, it's actually two
689 * iovecs, but we pack them into one and note how many of each there were.
691 * This function waits if necessary, and returns the descriptor number found.
693 static unsigned wait_for_vq_desc(struct virtqueue *vq,
695 unsigned int *out_num, unsigned int *in_num)
697 unsigned int i, head, max;
698 struct vring_desc *desc;
699 u16 last_avail = lg_last_avail(vq);
701 /* There's nothing available? */
702 while (last_avail == vq->vring.avail->idx) {
706 * Since we're about to sleep, now is a good time to tell the
707 * Guest about what we've used up to now.
711 /* OK, now we need to know about added descriptors. */
712 vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
715 * They could have slipped one in as we were doing that: make
716 * sure it's written, then check again.
719 if (last_avail != vq->vring.avail->idx) {
720 vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
724 /* Nothing new? Wait for eventfd to tell us they refilled. */
725 if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event))
726 errx(1, "Event read failed?");
728 /* We don't need to be notified again. */
729 vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
732 /* Check it isn't doing very strange things with descriptor numbers. */
733 if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
734 errx(1, "Guest moved used index from %u to %u",
735 last_avail, vq->vring.avail->idx);
738 * Make sure we read the descriptor number *after* we read the ring
739 * update; don't let the cpu or compiler change the order.
744 * Grab the next descriptor number they're advertising, and increment
745 * the index we've seen.
747 head = vq->vring.avail->ring[last_avail % vq->vring.num];
750 /* If their number is silly, that's a fatal mistake. */
751 if (head >= vq->vring.num)
752 errx(1, "Guest says index %u is available", head);
754 /* When we start there are none of either input nor output. */
755 *out_num = *in_num = 0;
758 desc = vq->vring.desc;
762 * We have to read the descriptor after we read the descriptor number,
763 * but there's a data dependency there so the CPU shouldn't reorder
764 * that: no rmb() required.
768 * If this is an indirect entry, then this buffer contains a descriptor
769 * table which we handle as if it's any normal descriptor chain.
771 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
772 if (desc[i].len % sizeof(struct vring_desc))
773 errx(1, "Invalid size for indirect buffer table");
775 max = desc[i].len / sizeof(struct vring_desc);
776 desc = check_pointer(desc[i].addr, desc[i].len);
781 /* Grab the first descriptor, and check it's OK. */
782 iov[*out_num + *in_num].iov_len = desc[i].len;
783 iov[*out_num + *in_num].iov_base
784 = check_pointer(desc[i].addr, desc[i].len);
785 /* If this is an input descriptor, increment that count. */
786 if (desc[i].flags & VRING_DESC_F_WRITE)
790 * If it's an output descriptor, they're all supposed
791 * to come before any input descriptors.
794 errx(1, "Descriptor has out after in");
798 /* If we've got too many, that implies a descriptor loop. */
799 if (*out_num + *in_num > max)
800 errx(1, "Looped descriptor");
801 } while ((i = next_desc(desc, i, max)) != max);
807 * After we've used one of their buffers, we tell the Guest about it. Sometime
808 * later we'll want to send them an interrupt using trigger_irq(); note that
809 * wait_for_vq_desc() does that for us if it has to wait.
811 static void add_used(struct virtqueue *vq, unsigned int head, int len)
813 struct vring_used_elem *used;
816 * The virtqueue contains a ring of used buffers. Get a pointer to the
817 * next entry in that used ring.
819 used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
822 /* Make sure buffer is written before we update index. */
824 vq->vring.used->idx++;
828 /* And here's the combo meal deal. Supersize me! */
829 static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len)
831 add_used(vq, head, len);
838 * We associate some data with the console for our exit hack.
840 struct console_abort {
841 /* How many times have they hit ^C? */
843 /* When did they start? */
844 struct timeval start;
847 /* This is the routine which handles console input (ie. stdin). */
848 static void console_input(struct virtqueue *vq)
851 unsigned int head, in_num, out_num;
852 struct console_abort *abort = vq->dev->priv;
853 struct iovec iov[vq->vring.num];
855 /* Make sure there's a descriptor available. */
856 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
858 errx(1, "Output buffers in console in queue?");
860 /* Read into it. This is where we usually wait. */
861 len = readv(STDIN_FILENO, iov, in_num);
863 /* Ran out of input? */
864 warnx("Failed to get console input, ignoring console.");
866 * For simplicity, dying threads kill the whole Launcher. So
873 /* Tell the Guest we used a buffer. */
874 add_used_and_trigger(vq, head, len);
877 * Three ^C within one second? Exit.
879 * This is such a hack, but works surprisingly well. Each ^C has to
880 * be in a buffer by itself, so they can't be too fast. But we check
881 * that we get three within about a second, so they can't be too
884 if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) {
890 if (abort->count == 1)
891 gettimeofday(&abort->start, NULL);
892 else if (abort->count == 3) {
894 gettimeofday(&now, NULL);
895 /* Kill all Launcher processes with SIGINT, like normal ^C */
896 if (now.tv_sec <= abort->start.tv_sec+1)
902 /* This is the routine which handles console output (ie. stdout). */
903 static void console_output(struct virtqueue *vq)
905 unsigned int head, out, in;
906 struct iovec iov[vq->vring.num];
908 /* We usually wait in here, for the Guest to give us something. */
909 head = wait_for_vq_desc(vq, iov, &out, &in);
911 errx(1, "Input buffers in console output queue?");
913 /* writev can return a partial write, so we loop here. */
914 while (!iov_empty(iov, out)) {
915 int len = writev(STDOUT_FILENO, iov, out);
917 warn("Write to stdout gave %i (%d)", len, errno);
920 iov_consume(iov, out, NULL, len);
924 * We're finished with that buffer: if we're going to sleep,
925 * wait_for_vq_desc() will prod the Guest with an interrupt.
927 add_used(vq, head, 0);
933 * Handling output for network is also simple: we get all the output buffers
934 * and write them to /dev/net/tun.
940 static void net_output(struct virtqueue *vq)
942 struct net_info *net_info = vq->dev->priv;
943 unsigned int head, out, in;
944 struct iovec iov[vq->vring.num];
946 /* We usually wait in here for the Guest to give us a packet. */
947 head = wait_for_vq_desc(vq, iov, &out, &in);
949 errx(1, "Input buffers in net output queue?");
951 * Send the whole thing through to /dev/net/tun. It expects the exact
952 * same format: what a coincidence!
954 if (writev(net_info->tunfd, iov, out) < 0)
955 warnx("Write to tun failed (%d)?", errno);
958 * Done with that one; wait_for_vq_desc() will send the interrupt if
959 * all packets are processed.
961 add_used(vq, head, 0);
965 * Handling network input is a bit trickier, because I've tried to optimize it.
967 * First we have a helper routine which tells is if from this file descriptor
968 * (ie. the /dev/net/tun device) will block:
970 static bool will_block(int fd)
973 struct timeval zero = { 0, 0 };
976 return select(fd+1, &fdset, NULL, NULL, &zero) != 1;
980 * This handles packets coming in from the tun device to our Guest. Like all
981 * service routines, it gets called again as soon as it returns, so you don't
982 * see a while(1) loop here.
984 static void net_input(struct virtqueue *vq)
987 unsigned int head, out, in;
988 struct iovec iov[vq->vring.num];
989 struct net_info *net_info = vq->dev->priv;
992 * Get a descriptor to write an incoming packet into. This will also
993 * send an interrupt if they're out of descriptors.
995 head = wait_for_vq_desc(vq, iov, &out, &in);
997 errx(1, "Output buffers in net input queue?");
1000 * If it looks like we'll block reading from the tun device, send them
1003 if (vq->pending_used && will_block(net_info->tunfd))
1007 * Read in the packet. This is where we normally wait (when there's no
1008 * incoming network traffic).
1010 len = readv(net_info->tunfd, iov, in);
1012 warn("Failed to read from tun (%d).", errno);
1015 * Mark that packet buffer as used, but don't interrupt here. We want
1016 * to wait until we've done as much work as we can.
1018 add_used(vq, head, len);
1022 /* This is the helper to create threads: run the service routine in a loop. */
1023 static int do_thread(void *_vq)
1025 struct virtqueue *vq = _vq;
1033 * When a child dies, we kill our entire process group with SIGTERM. This
1034 * also has the side effect that the shell restores the console for us!
1036 static void kill_launcher(int signal)
1041 static void reset_device(struct device *dev)
1043 struct virtqueue *vq;
1045 verbose("Resetting device %s\n", dev->name);
1047 /* Clear any features they've acked. */
1048 dev->features_accepted = 0;
1050 /* We're going to be explicitly killing threads, so ignore them. */
1051 signal(SIGCHLD, SIG_IGN);
1053 /* Get rid of the virtqueue threads */
1054 for (vq = dev->vq; vq; vq = vq->next) {
1055 if (vq->thread != (pid_t)-1) {
1056 kill(vq->thread, SIGTERM);
1057 waitpid(vq->thread, NULL, 0);
1058 vq->thread = (pid_t)-1;
1061 dev->running = false;
1063 /* Now we care if threads die. */
1064 signal(SIGCHLD, (void *)kill_launcher);
1067 static void cleanup_devices(void)
1071 for (i = 1; i < MAX_PCI_DEVICES; i++) {
1072 struct device *d = devices.pci[i];
1078 /* If we saved off the original terminal settings, restore them now. */
1079 if (orig_term.c_lflag & (ISIG|ICANON|ECHO))
1080 tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
1084 * This is the generic routine we call when the Guest uses LHCALL_NOTIFY.
1086 static void handle_output(unsigned long addr)
1089 * Early console write is done using notify on a nul-terminated string
1090 * in Guest memory. It's also great for hacking debugging messages
1093 if (addr >= guest_limit)
1094 errx(1, "Bad NOTIFY %#lx", addr);
1096 write(STDOUT_FILENO, from_guest_phys(addr),
1097 strnlen(from_guest_phys(addr), guest_limit - addr));
1101 * We do PCI. This is mainly done to let us test the kernel virtio PCI
1105 /* Linux expects a PCI host bridge: ours is a dummy, and first on the bus. */
1106 static struct device pci_host_bridge;
1108 static void init_pci_host_bridge(void)
1110 pci_host_bridge.name = "PCI Host Bridge";
1111 pci_host_bridge.config.class = 0x06; /* bridge */
1112 pci_host_bridge.config.subclass = 0; /* host bridge */
1113 devices.pci[0] = &pci_host_bridge;
1116 /* The IO ports used to read the PCI config space. */
1117 #define PCI_CONFIG_ADDR 0xCF8
1118 #define PCI_CONFIG_DATA 0xCFC
1121 * Not really portable, but does help readability: this is what the Guest
1122 * writes to the PCI_CONFIG_ADDR IO port.
1124 union pci_config_addr {
1128 unsigned funcnum: 3;
1131 unsigned reserved: 7;
1132 unsigned enabled : 1;
1138 * We cache what they wrote to the address port, so we know what they're
1139 * talking about when they access the data port.
1141 static union pci_config_addr pci_config_addr;
1143 static struct device *find_pci_device(unsigned int index)
1145 return devices.pci[index];
1148 /* PCI can do 1, 2 and 4 byte reads; we handle that here. */
1149 static void ioread(u16 off, u32 v, u32 mask, u32 *val)
1152 assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
1153 *val = (v >> (off * 8)) & mask;
1156 /* PCI can do 1, 2 and 4 byte writes; we handle that here. */
1157 static void iowrite(u16 off, u32 v, u32 mask, u32 *dst)
1160 assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
1161 *dst &= ~(mask << (off * 8));
1162 *dst |= (v & mask) << (off * 8);
1166 * Where PCI_CONFIG_DATA accesses depends on the previous write to
1169 static struct device *dev_and_reg(u32 *reg)
1171 if (!pci_config_addr.bits.enabled)
1174 if (pci_config_addr.bits.funcnum != 0)
1177 if (pci_config_addr.bits.busnum != 0)
1180 if (pci_config_addr.bits.offset * 4 >= sizeof(struct pci_config))
1183 *reg = pci_config_addr.bits.offset;
1184 return find_pci_device(pci_config_addr.bits.devnum);
1187 /* Is this accessing the PCI config address port?. */
1188 static bool is_pci_addr_port(u16 port)
1190 return port >= PCI_CONFIG_ADDR && port < PCI_CONFIG_ADDR + 4;
1193 static bool pci_addr_iowrite(u16 port, u32 mask, u32 val)
1195 iowrite(port - PCI_CONFIG_ADDR, val, mask,
1196 &pci_config_addr.val);
1197 verbose("PCI%s: %#x/%x: bus %u dev %u func %u reg %u\n",
1198 pci_config_addr.bits.enabled ? "" : " DISABLED",
1200 pci_config_addr.bits.busnum,
1201 pci_config_addr.bits.devnum,
1202 pci_config_addr.bits.funcnum,
1203 pci_config_addr.bits.offset);
1207 static void pci_addr_ioread(u16 port, u32 mask, u32 *val)
1209 ioread(port - PCI_CONFIG_ADDR, pci_config_addr.val, mask, val);
1212 /* Is this accessing the PCI config data port?. */
1213 static bool is_pci_data_port(u16 port)
1215 return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4;
1218 static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
1221 struct device *d = dev_and_reg(®);
1223 /* Complain if they don't belong to a device. */
1227 /* They can do 1 byte writes, etc. */
1228 portoff = port - PCI_CONFIG_DATA;
1231 * PCI uses a weird way to determine the BAR size: the OS
1232 * writes all 1's, and sees which ones stick.
1234 if (&d->config_words[reg] == &d->config.bar[0]) {
1237 iowrite(portoff, val, mask, &d->config.bar[0]);
1238 for (i = 0; (1 << i) < d->mmio_size; i++)
1239 d->config.bar[0] &= ~(1 << i);
1241 } else if ((&d->config_words[reg] > &d->config.bar[0]
1242 && &d->config_words[reg] <= &d->config.bar[6])
1243 || &d->config_words[reg] == &d->config.expansion_rom_addr) {
1244 /* Allow writing to any other BAR, or expansion ROM */
1245 iowrite(portoff, val, mask, &d->config_words[reg]);
1247 /* We let them overide latency timer and cacheline size */
1248 } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
1249 /* Only let them change the first two fields. */
1250 if (mask == 0xFFFFFFFF)
1252 iowrite(portoff, val, mask, &d->config_words[reg]);
1254 } else if (&d->config_words[reg] == (void *)&d->config.command
1255 && mask == 0xFFFF) {
1256 /* Ignore command writes. */
1260 /* Complain about other writes. */
1264 static void pci_data_ioread(u16 port, u32 mask, u32 *val)
1267 struct device *d = dev_and_reg(®);
1271 ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val);
1275 * This is where we emulate a handful of Guest instructions. It's ugly
1276 * and we used to do it in the kernel but it grew over time.
1280 * We use the ptrace syscall's pt_regs struct to talk about registers
1281 * to lguest: these macros convert the names to the offsets.
1283 #define getreg(name) getreg_off(offsetof(struct user_regs_struct, name))
1284 #define setreg(name, val) \
1285 setreg_off(offsetof(struct user_regs_struct, name), (val))
1287 static u32 getreg_off(size_t offset)
1290 unsigned long args[] = { LHREQ_GETREG, offset };
1292 if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
1293 err(1, "Getting register %u", offset);
1294 if (pread(lguest_fd, &r, sizeof(r), cpu_id) != sizeof(r))
1295 err(1, "Reading register %u", offset);
1300 static void setreg_off(size_t offset, u32 val)
1302 unsigned long args[] = { LHREQ_SETREG, offset, val };
1304 if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
1305 err(1, "Setting register %u", offset);
1308 /* Get register by instruction encoding */
1309 static u32 getreg_num(unsigned regnum, u32 mask)
1311 /* 8 bit ops use regnums 4-7 for high parts of word */
1312 if (mask == 0xFF && (regnum & 0x4))
1313 return getreg_num(regnum & 0x3, 0xFFFF) >> 8;
1316 case 0: return getreg(eax) & mask;
1317 case 1: return getreg(ecx) & mask;
1318 case 2: return getreg(edx) & mask;
1319 case 3: return getreg(ebx) & mask;
1320 case 4: return getreg(esp) & mask;
1321 case 5: return getreg(ebp) & mask;
1322 case 6: return getreg(esi) & mask;
1323 case 7: return getreg(edi) & mask;
1328 /* Set register by instruction encoding */
1329 static void setreg_num(unsigned regnum, u32 val, u32 mask)
1331 /* Don't try to set bits out of range */
1332 assert(~(val & ~mask));
1334 /* 8 bit ops use regnums 4-7 for high parts of word */
1335 if (mask == 0xFF && (regnum & 0x4)) {
1336 /* Construct the 16 bits we want. */
1337 val = (val << 8) | getreg_num(regnum & 0x3, 0xFF);
1338 setreg_num(regnum & 0x3, val, 0xFFFF);
1343 case 0: setreg(eax, val | (getreg(eax) & ~mask)); return;
1344 case 1: setreg(ecx, val | (getreg(ecx) & ~mask)); return;
1345 case 2: setreg(edx, val | (getreg(edx) & ~mask)); return;
1346 case 3: setreg(ebx, val | (getreg(ebx) & ~mask)); return;
1347 case 4: setreg(esp, val | (getreg(esp) & ~mask)); return;
1348 case 5: setreg(ebp, val | (getreg(ebp) & ~mask)); return;
1349 case 6: setreg(esi, val | (getreg(esi) & ~mask)); return;
1350 case 7: setreg(edi, val | (getreg(edi) & ~mask)); return;
1355 /* Get bytes of displacement appended to instruction, from r/m encoding */
1356 static u32 insn_displacement_len(u8 mod_reg_rm)
1358 /* Switch on the mod bits */
1359 switch (mod_reg_rm >> 6) {
1361 /* If mod == 0, and r/m == 101, 16-bit displacement follows */
1362 if ((mod_reg_rm & 0x7) == 0x5)
1364 /* Normally, mod == 0 means no literal displacement */
1367 /* One byte displacement */
1370 /* Four byte displacement */
1379 static void emulate_insn(const u8 insn[])
1381 unsigned long args[] = { LHREQ_TRAP, 13 };
1382 unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access;
1383 unsigned int eax, port, mask;
1385 * Default is to return all-ones on IO port reads, which traditionally
1386 * means "there's nothing there".
1388 u32 val = 0xFFFFFFFF;
1391 * This must be the Guest kernel trying to do something, not userspace!
1392 * The bottom two bits of the CS segment register are the privilege
1395 if ((getreg(xcs) & 3) != 0x1)
1398 /* Decoding x86 instructions is icky. */
1401 * Around 2.6.33, the kernel started using an emulation for the
1402 * cmpxchg8b instruction in early boot on many configurations. This
1403 * code isn't paravirtualized, and it tries to disable interrupts.
1404 * Ignore it, which will Mostly Work.
1406 if (insn[insnlen] == 0xfa) {
1407 /* "cli", or Clear Interrupt Enable instruction. Skip it. */
1413 * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out.
1415 if (insn[insnlen] == 0x66) {
1417 /* The instruction is 1 byte so far, read the next byte. */
1421 /* If the lower bit isn't set, it's a single byte access */
1422 byte_access = !(insn[insnlen] & 1);
1425 * Now we can ignore the lower bit and decode the 4 opcodes
1426 * we need to emulate.
1428 switch (insn[insnlen] & 0xFE) {
1429 case 0xE4: /* in <next byte>,%al */
1430 port = insn[insnlen+1];
1434 case 0xEC: /* in (%dx),%al */
1435 port = getreg(edx) & 0xFFFF;
1439 case 0xE6: /* out %al,<next byte> */
1440 port = insn[insnlen+1];
1443 case 0xEE: /* out %al,(%dx) */
1444 port = getreg(edx) & 0xFFFF;
1448 /* OK, we don't know what this is, can't emulate. */
1452 /* Set a mask of the 1, 2 or 4 bytes, depending on size of IO */
1455 else if (small_operand)
1461 * If it was an "IN" instruction, they expect the result to be read
1462 * into %eax, so we change %eax.
1467 /* This is the PS/2 keyboard status; 1 means ready for output */
1470 else if (is_pci_addr_port(port))
1471 pci_addr_ioread(port, mask, &val);
1472 else if (is_pci_data_port(port))
1473 pci_data_ioread(port, mask, &val);
1475 /* Clear the bits we're about to read */
1477 /* Copy bits in from val. */
1479 /* Now update the register. */
1482 if (is_pci_addr_port(port)) {
1483 if (!pci_addr_iowrite(port, mask, eax))
1485 } else if (is_pci_data_port(port)) {
1486 if (!pci_data_iowrite(port, mask, eax))
1489 /* There are many other ports, eg. CMOS clock, serial
1490 * and parallel ports, so we ignore them all. */
1493 verbose("IO %s of %x to %u: %#08x\n",
1494 in ? "IN" : "OUT", mask, port, eax);
1496 /* Finally, we've "done" the instruction, so move past it. */
1497 setreg(eip, getreg(eip) + insnlen);
1501 warnx("Attempt to %s port %u (%#x mask)",
1502 in ? "read from" : "write to", port, mask);
1505 /* Inject trap into Guest. */
1506 if (write(lguest_fd, args, sizeof(args)) < 0)
1507 err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip));
1510 static struct device *find_mmio_region(unsigned long paddr, u32 *off)
1514 for (i = 1; i < MAX_PCI_DEVICES; i++) {
1515 struct device *d = devices.pci[i];
1519 if (paddr < d->mmio_addr)
1521 if (paddr >= d->mmio_addr + d->mmio_size)
1523 *off = paddr - d->mmio_addr;
1529 /* FIXME: Use vq array. */
1530 static struct virtqueue *vq_by_num(struct device *d, u32 num)
1532 struct virtqueue *vq = d->vq;
1540 static void save_vq_config(const struct virtio_pci_common_cfg *cfg,
1541 struct virtqueue *vq)
1543 vq->pci_config = *cfg;
1546 static void restore_vq_config(struct virtio_pci_common_cfg *cfg,
1547 struct virtqueue *vq)
1549 /* Only restore the per-vq part */
1550 size_t off = offsetof(struct virtio_pci_common_cfg, queue_size);
1552 memcpy((void *)cfg + off, (void *)&vq->pci_config + off,
1553 sizeof(*cfg) - off);
1557 * When they enable the virtqueue, we check that their setup is valid.
1559 static void enable_virtqueue(struct device *d, struct virtqueue *vq)
1562 * Create stack for thread. Since the stack grows upwards, we point
1563 * the stack pointer to the end of this region.
1565 char *stack = malloc(32768);
1567 /* Because lguest is 32 bit, all the descriptor high bits must be 0 */
1568 if (vq->pci_config.queue_desc_hi
1569 || vq->pci_config.queue_avail_hi
1570 || vq->pci_config.queue_used_hi)
1571 errx(1, "%s: invalid 64-bit queue address", d->name);
1573 /* Initialize the virtqueue and check they're all in range. */
1574 vq->vring.num = vq->pci_config.queue_size;
1575 vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo,
1576 sizeof(*vq->vring.desc) * vq->vring.num);
1577 vq->vring.avail = check_pointer(vq->pci_config.queue_avail_lo,
1578 sizeof(*vq->vring.avail)
1579 + (sizeof(vq->vring.avail->ring[0])
1581 vq->vring.used = check_pointer(vq->pci_config.queue_used_lo,
1582 sizeof(*vq->vring.used)
1583 + (sizeof(vq->vring.used->ring[0])
1587 /* Create a zero-initialized eventfd. */
1588 vq->eventfd = eventfd(0, 0);
1589 if (vq->eventfd < 0)
1590 err(1, "Creating eventfd");
1593 * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
1594 * we get a signal if it dies.
1596 vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
1597 if (vq->thread == (pid_t)-1)
1598 err(1, "Creating clone");
1601 static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
1603 struct virtqueue *vq;
1606 case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
1608 d->mmio->cfg.device_feature = d->features;
1610 d->mmio->cfg.device_feature = (d->features >> 32);
1612 d->mmio->cfg.device_feature = 0;
1613 goto write_through32;
1614 case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
1616 errx(1, "%s: Unexpected driver select %u",
1618 goto write_through32;
1619 case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
1620 if (d->mmio->cfg.guest_feature_select == 0) {
1621 d->features_accepted &= ~((u64)0xFFFFFFFF);
1622 d->features_accepted |= val;
1624 assert(d->mmio->cfg.guest_feature_select == 1);
1625 d->features_accepted &= ((u64)0xFFFFFFFF << 32);
1626 d->features_accepted |= ((u64)val) << 32;
1628 if (d->features_accepted & ~d->features)
1629 errx(1, "%s: over-accepted features %#llx of %#llx",
1630 d->name, d->features_accepted, d->features);
1631 goto write_through32;
1632 case offsetof(struct virtio_pci_mmio, cfg.device_status):
1633 verbose("%s: device status -> %#x\n", d->name, val);
1636 goto write_through8;
1637 case offsetof(struct virtio_pci_mmio, cfg.queue_select):
1638 vq = vq_by_num(d, val);
1639 /* Out of range? Return size 0 */
1641 d->mmio->cfg.queue_size = 0;
1642 goto write_through16;
1644 /* Save registers for old vq, if it was a valid vq */
1645 if (d->mmio->cfg.queue_size)
1646 save_vq_config(&d->mmio->cfg,
1647 vq_by_num(d, d->mmio->cfg.queue_select));
1648 /* Restore the registers for the queue they asked for */
1649 restore_vq_config(&d->mmio->cfg, vq);
1650 goto write_through16;
1651 case offsetof(struct virtio_pci_mmio, cfg.queue_size):
1653 errx(1, "%s: invalid queue size %u\n", d->name, val);
1654 if (d->mmio->cfg.queue_enable)
1655 errx(1, "%s: changing queue size on live device",
1657 goto write_through16;
1658 case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector):
1659 errx(1, "%s: attempt to set MSIX vector to %u",
1661 case offsetof(struct virtio_pci_mmio, cfg.queue_enable):
1663 errx(1, "%s: setting queue_enable to %u", d->name, val);
1664 d->mmio->cfg.queue_enable = val;
1665 save_vq_config(&d->mmio->cfg,
1666 vq_by_num(d, d->mmio->cfg.queue_select));
1667 enable_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select));
1668 goto write_through16;
1669 case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off):
1670 errx(1, "%s: attempt to write to queue_notify_off", d->name);
1671 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo):
1672 case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi):
1673 case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo):
1674 case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi):
1675 case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo):
1676 case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi):
1677 if (d->mmio->cfg.queue_enable)
1678 errx(1, "%s: changing queue on live device",
1680 goto write_through32;
1681 case offsetof(struct virtio_pci_mmio, notify):
1682 vq = vq_by_num(d, val);
1684 errx(1, "Invalid vq notification on %u", val);
1685 /* Notify the process handling this vq by adding 1 to eventfd */
1686 write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8);
1687 goto write_through16;
1688 case offsetof(struct virtio_pci_mmio, isr):
1689 errx(1, "%s: Unexpected write to isr", d->name);
1691 errx(1, "%s: Unexpected write to offset %u", d->name, off);
1695 if (mask != 0xFFFFFFFF) {
1696 errx(1, "%s: non-32-bit write to offset %u (%#x)",
1697 d->name, off, getreg(eip));
1700 memcpy((char *)d->mmio + off, &val, 4);
1705 errx(1, "%s: non-16-bit (%#x) write to offset %u (%#x)",
1706 d->name, mask, off, getreg(eip));
1707 memcpy((char *)d->mmio + off, &val, 2);
1712 errx(1, "%s: non-8-bit write to offset %u (%#x)",
1713 d->name, off, getreg(eip));
1714 memcpy((char *)d->mmio + off, &val, 1);
1718 static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
1724 case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
1725 case offsetof(struct virtio_pci_mmio, cfg.device_feature):
1726 case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
1727 case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
1728 goto read_through32;
1729 case offsetof(struct virtio_pci_mmio, cfg.msix_config):
1730 errx(1, "%s: read of msix_config", d->name);
1731 case offsetof(struct virtio_pci_mmio, cfg.num_queues):
1732 goto read_through16;
1733 case offsetof(struct virtio_pci_mmio, cfg.device_status):
1734 case offsetof(struct virtio_pci_mmio, cfg.config_generation):
1736 case offsetof(struct virtio_pci_mmio, notify):
1737 goto read_through16;
1738 case offsetof(struct virtio_pci_mmio, isr):
1740 errx(1, "%s: non-8-bit read from offset %u (%#x)",
1741 d->name, off, getreg(eip));
1742 /* Read resets the isr */
1746 case offsetof(struct virtio_pci_mmio, padding):
1747 errx(1, "%s: read from padding (%#x)",
1748 d->name, getreg(eip));
1750 /* Read from device config space, beware unaligned overflow */
1751 if (off > d->mmio_size - 4)
1752 errx(1, "%s: read past end (%#x)",
1753 d->name, getreg(eip));
1754 if (mask == 0xFFFFFFFF)
1755 goto read_through32;
1756 else if (mask == 0xFFFF)
1757 goto read_through16;
1763 if (mask != 0xFFFFFFFF)
1764 errx(1, "%s: non-32-bit read to offset %u (%#x)",
1765 d->name, off, getreg(eip));
1766 memcpy(&val, (char *)d->mmio + off, 4);
1771 errx(1, "%s: non-16-bit read to offset %u (%#x)",
1772 d->name, off, getreg(eip));
1773 memcpy(&val, (char *)d->mmio + off, 2);
1778 errx(1, "%s: non-8-bit read to offset %u (%#x)",
1779 d->name, off, getreg(eip));
1780 memcpy(&val, (char *)d->mmio + off, 1);
1784 static void emulate_mmio(unsigned long paddr, const u8 *insn)
1786 u32 val, off, mask = 0xFFFFFFFF, insnlen = 0;
1787 struct device *d = find_mmio_region(paddr, &off);
1788 unsigned long args[] = { LHREQ_TRAP, 14 };
1791 warnx("MMIO touching %#08lx (not a device)", paddr);
1795 /* Prefix makes it a 16 bit op */
1796 if (insn[0] == 0x66) {
1802 if (insn[insnlen] == 0x89) {
1803 /* Next byte is r/m byte: bits 3-5 are register. */
1804 val = getreg_num((insn[insnlen+1] >> 3) & 0x7, mask);
1805 emulate_mmio_write(d, off, val, mask);
1806 insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
1807 } else if (insn[insnlen] == 0x8b) { /* ioread */
1808 /* Next byte is r/m byte: bits 3-5 are register. */
1809 val = emulate_mmio_read(d, off, mask);
1810 setreg_num((insn[insnlen+1] >> 3) & 0x7, val, mask);
1811 insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
1812 } else if (insn[0] == 0x88) { /* 8-bit iowrite */
1814 /* Next byte is r/m byte: bits 3-5 are register. */
1815 val = getreg_num((insn[1] >> 3) & 0x7, mask);
1816 emulate_mmio_write(d, off, val, mask);
1817 insnlen = 2 + insn_displacement_len(insn[1]);
1818 } else if (insn[0] == 0x8a) { /* 8-bit ioread */
1820 val = emulate_mmio_read(d, off, mask);
1821 setreg_num((insn[1] >> 3) & 0x7, val, mask);
1822 insnlen = 2 + insn_displacement_len(insn[1]);
1824 warnx("Unknown MMIO instruction touching %#08lx:"
1825 " %02x %02x %02x %02x at %u",
1826 paddr, insn[0], insn[1], insn[2], insn[3], getreg(eip));
1828 /* Inject trap into Guest. */
1829 if (write(lguest_fd, args, sizeof(args)) < 0)
1830 err(1, "Reinjecting trap 14 for fault at %#x",
1835 /* Finally, we've "done" the instruction, so move past it. */
1836 setreg(eip, getreg(eip) + insnlen);
1842 * All devices need a descriptor so the Guest knows it exists, and a "struct
1843 * device" so the Launcher can keep track of it. We have common helper
1844 * routines to allocate and manage them.
1846 static void add_pci_virtqueue(struct device *dev,
1847 void (*service)(struct virtqueue *))
1849 struct virtqueue **i, *vq = malloc(sizeof(*vq));
1851 /* Initialize the virtqueue */
1853 vq->last_avail_idx = 0;
1857 * This is the routine the service thread will run, and its Process ID
1858 * once it's running.
1860 vq->service = service;
1861 vq->thread = (pid_t)-1;
1863 /* Initialize the configuration. */
1864 vq->pci_config.queue_size = VIRTQUEUE_NUM;
1865 vq->pci_config.queue_enable = 0;
1866 vq->pci_config.queue_notify_off = 0;
1868 /* Add one to the number of queues */
1869 vq->dev->mmio->cfg.num_queues++;
1872 * Add to tail of list, so dev->vq is first vq, dev->vq->next is
1875 for (i = &dev->vq; *i; i = &(*i)->next);
1879 /* The Guest accesses the feature bits via the PCI common config MMIO region */
1880 static void add_pci_feature(struct device *dev, unsigned bit)
1882 dev->features |= (1ULL << bit);
1885 /* For devices with no config. */
1886 static void no_device_config(struct device *dev)
1888 dev->mmio_addr = get_mmio_region(dev->mmio_size);
1890 dev->config.bar[0] = dev->mmio_addr;
1891 /* Bottom 4 bits must be zero */
1892 assert(~(dev->config.bar[0] & 0xF));
1895 /* This puts the device config into BAR0 */
1896 static void set_device_config(struct device *dev, const void *conf, size_t len)
1899 dev->mmio_size += len;
1900 dev->mmio = realloc(dev->mmio, dev->mmio_size);
1901 memcpy(dev->mmio + 1, conf, len);
1903 /* Hook up device cfg */
1904 dev->config.cfg_access.cap.cap_next
1905 = offsetof(struct pci_config, device);
1907 /* Fix up device cfg field length. */
1908 dev->config.device.length = len;
1910 /* The rest is the same as the no-config case */
1911 no_device_config(dev);
1914 static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type,
1915 size_t bar_offset, size_t bar_bytes, u8 next)
1917 cap->cap_vndr = PCI_CAP_ID_VNDR;
1918 cap->cap_next = next;
1919 cap->cap_len = caplen;
1920 cap->cfg_type = type;
1922 memset(cap->padding, 0, sizeof(cap->padding));
1923 cap->offset = bar_offset;
1924 cap->length = bar_bytes;
1928 * This sets up the pci_config structure, as defined in the virtio 1.0
1929 * standard (and PCI standard).
1931 static void init_pci_config(struct pci_config *pci, u16 type,
1932 u8 class, u8 subclass)
1934 size_t bar_offset, bar_len;
1936 /* Save typing: most thing are happy being zero. */
1937 memset(pci, 0, sizeof(*pci));
1939 /* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */
1940 pci->vendor_id = 0x1AF4;
1941 /* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */
1942 pci->device_id = 0x1040 + type;
1945 * PCI have specific codes for different types of devices.
1946 * Linux doesn't care, but it's a good clue for people looking
1950 pci->subclass = subclass;
1953 * 4.1.2.1 Non-transitional devices SHOULD have a PCI Revision
1959 * 4.1.2.1 Non-transitional devices SHOULD have a PCI
1960 * Subsystem Device ID of 0x40 or higher.
1962 pci->subsystem_device_id = 0x40;
1964 /* We use our dummy interrupt controller, and irq_line is the irq */
1965 pci->irq_line = devices.next_irq++;
1968 /* Support for extended capabilities. */
1969 pci->status = (1 << 4);
1972 pci->capabilities = offsetof(struct pci_config, common);
1974 bar_offset = offsetof(struct virtio_pci_mmio, cfg);
1975 bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg);
1976 init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG,
1977 bar_offset, bar_len,
1978 offsetof(struct pci_config, notify));
1980 bar_offset += bar_len;
1981 bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify);
1982 /* FIXME: Use a non-zero notify_off, for per-queue notification? */
1983 init_cap(&pci->notify.cap, sizeof(pci->notify),
1984 VIRTIO_PCI_CAP_NOTIFY_CFG,
1985 bar_offset, bar_len,
1986 offsetof(struct pci_config, isr));
1988 bar_offset += bar_len;
1989 bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr);
1990 init_cap(&pci->isr, sizeof(pci->isr),
1991 VIRTIO_PCI_CAP_ISR_CFG,
1992 bar_offset, bar_len,
1993 offsetof(struct pci_config, cfg_access));
1995 /* This doesn't have any presence in the BAR */
1996 init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access),
1997 VIRTIO_PCI_CAP_PCI_CFG,
2000 bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding);
2001 assert(bar_offset == sizeof(struct virtio_pci_mmio));
2004 * This gets sewn in and length set in set_device_config().
2005 * Some devices don't have a device configuration interface, so
2006 * we never expose this if we don't call set_device_config().
2008 init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG,
2013 * This routine does all the creation and setup of a new device, but we don't
2014 * actually place the MMIO region until we know the size (if any) of the
2015 * device-specific config. And we don't actually start the service threads
2018 * See what I mean about userspace being boring?
2020 static struct device *new_pci_device(const char *name, u16 type,
2021 u8 class, u8 subclass)
2023 struct device *dev = malloc(sizeof(*dev));
2025 /* Now we populate the fields one at a time. */
2028 dev->running = false;
2029 dev->mmio_size = sizeof(struct virtio_pci_mmio);
2030 dev->mmio = calloc(1, dev->mmio_size);
2031 dev->features = (u64)1 << VIRTIO_F_VERSION_1;
2032 dev->features_accepted = 0;
2034 if (devices.device_num + 1 >= MAX_PCI_DEVICES)
2035 errx(1, "Can only handle 31 PCI devices");
2037 init_pci_config(&dev->config, type, class, subclass);
2038 assert(!devices.pci[devices.device_num+1]);
2039 devices.pci[++devices.device_num] = dev;
2045 * Our first setup routine is the console. It's a fairly simple device, but
2046 * UNIX tty handling makes it uglier than it could be.
2048 static void setup_console(void)
2052 /* If we can save the initial standard input settings... */
2053 if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
2054 struct termios term = orig_term;
2056 * Then we turn off echo, line buffering and ^C etc: We want a
2057 * raw input stream to the Guest.
2059 term.c_lflag &= ~(ISIG|ICANON|ECHO);
2060 tcsetattr(STDIN_FILENO, TCSANOW, &term);
2063 dev = new_pci_device("console", VIRTIO_ID_CONSOLE, 0x07, 0x00);
2065 /* We store the console state in dev->priv, and initialize it. */
2066 dev->priv = malloc(sizeof(struct console_abort));
2067 ((struct console_abort *)dev->priv)->count = 0;
2070 * The console needs two virtqueues: the input then the output. When
2071 * they put something the input queue, we make sure we're listening to
2072 * stdin. When they put something in the output queue, we write it to
2075 add_pci_virtqueue(dev, console_input);
2076 add_pci_virtqueue(dev, console_output);
2078 /* There's no configuration area for this device. */
2079 no_device_config(dev);
2081 verbose("device %u: console\n", devices.device_num);
2086 * Inter-guest networking is an interesting area. Simplest is to have a
2087 * --sharenet=<name> option which opens or creates a named pipe. This can be
2088 * used to send packets to another guest in a 1:1 manner.
2090 * More sophisticated is to use one of the tools developed for project like UML
2093 * Faster is to do virtio bonding in kernel. Doing this 1:1 would be
2094 * completely generic ("here's my vring, attach to your vring") and would work
2095 * for any traffic. Of course, namespace and permissions issues need to be
2096 * dealt with. A more sophisticated "multi-channel" virtio_net.c could hide
2097 * multiple inter-guest channels behind one interface, although it would
2098 * require some manner of hotplugging new virtio channels.
2100 * Finally, we could use a virtio network switch in the kernel, ie. vhost.
2103 static u32 str2ip(const char *ipaddr)
2107 if (sscanf(ipaddr, "%u.%u.%u.%u", &b[0], &b[1], &b[2], &b[3]) != 4)
2108 errx(1, "Failed to parse IP address '%s'", ipaddr);
2109 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
2112 static void str2mac(const char *macaddr, unsigned char mac[6])
2115 if (sscanf(macaddr, "%02x:%02x:%02x:%02x:%02x:%02x",
2116 &m[0], &m[1], &m[2], &m[3], &m[4], &m[5]) != 6)
2117 errx(1, "Failed to parse mac address '%s'", macaddr);
2127 * This code is "adapted" from libbridge: it attaches the Host end of the
2128 * network device to the bridge device specified by the command line.
2130 * This is yet another James Morris contribution (I'm an IP-level guy, so I
2131 * dislike bridging), and I just try not to break it.
2133 static void add_to_bridge(int fd, const char *if_name, const char *br_name)
2139 errx(1, "must specify bridge name");
2141 ifidx = if_nametoindex(if_name);
2143 errx(1, "interface %s does not exist!", if_name);
2145 strncpy(ifr.ifr_name, br_name, IFNAMSIZ);
2146 ifr.ifr_name[IFNAMSIZ-1] = '\0';
2147 ifr.ifr_ifindex = ifidx;
2148 if (ioctl(fd, SIOCBRADDIF, &ifr) < 0)
2149 err(1, "can't add %s to bridge %s", if_name, br_name);
2153 * This sets up the Host end of the network device with an IP address, brings
2154 * it up so packets will flow, the copies the MAC address into the hwaddr
2157 static void configure_device(int fd, const char *tapif, u32 ipaddr)
2160 struct sockaddr_in sin;
2162 memset(&ifr, 0, sizeof(ifr));
2163 strcpy(ifr.ifr_name, tapif);
2165 /* Don't read these incantations. Just cut & paste them like I did! */
2166 sin.sin_family = AF_INET;
2167 sin.sin_addr.s_addr = htonl(ipaddr);
2168 memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
2169 if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
2170 err(1, "Setting %s interface address", tapif);
2171 ifr.ifr_flags = IFF_UP;
2172 if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0)
2173 err(1, "Bringing interface %s up", tapif);
2176 static int get_tun_device(char tapif[IFNAMSIZ])
2182 /* Start with this zeroed. Messy but sure. */
2183 memset(&ifr, 0, sizeof(ifr));
2186 * We open the /dev/net/tun device and tell it we want a tap device. A
2187 * tap device is like a tun device, only somehow different. To tell
2188 * the truth, I completely blundered my way through this code, but it
2191 netfd = open_or_die("/dev/net/tun", O_RDWR);
2192 ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
2193 strcpy(ifr.ifr_name, "tap%d");
2194 if (ioctl(netfd, TUNSETIFF, &ifr) != 0)
2195 err(1, "configuring /dev/net/tun");
2197 if (ioctl(netfd, TUNSETOFFLOAD,
2198 TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0)
2199 err(1, "Could not set features for tun device");
2202 * We don't need checksums calculated for packets coming in this
2205 ioctl(netfd, TUNSETNOCSUM, 1);
2208 * In virtio before 1.0 (aka legacy virtio), we added a 16-bit
2209 * field at the end of the network header iff
2210 * VIRTIO_NET_F_MRG_RXBUF was negotiated. For virtio 1.0,
2211 * that became the norm, but we need to tell the tun device
2212 * about our expanded header (which is called
2213 * virtio_net_hdr_mrg_rxbuf in the legacy system).
2215 vnet_hdr_sz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2216 if (ioctl(netfd, TUNSETVNETHDRSZ, &vnet_hdr_sz) != 0)
2217 err(1, "Setting tun header size to %u", vnet_hdr_sz);
2219 memcpy(tapif, ifr.ifr_name, IFNAMSIZ);
2224 * Our network is a Host<->Guest network. This can either use bridging or
2225 * routing, but the principle is the same: it uses the "tun" device to inject
2226 * packets into the Host as if they came in from a normal network card. We
2227 * just shunt packets between the Guest and the tun device.
2229 static void setup_tun_net(char *arg)
2232 struct net_info *net_info = malloc(sizeof(*net_info));
2234 u32 ip = INADDR_ANY;
2235 bool bridging = false;
2236 char tapif[IFNAMSIZ], *p;
2237 struct virtio_net_config conf;
2239 net_info->tunfd = get_tun_device(tapif);
2241 /* First we create a new network device. */
2242 dev = new_pci_device("net", VIRTIO_ID_NET, 0x02, 0x00);
2243 dev->priv = net_info;
2245 /* Network devices need a recv and a send queue, just like console. */
2246 add_pci_virtqueue(dev, net_input);
2247 add_pci_virtqueue(dev, net_output);
2250 * We need a socket to perform the magic network ioctls to bring up the
2251 * tap interface, connect to the bridge etc. Any socket will do!
2253 ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
2255 err(1, "opening IP socket");
2257 /* If the command line was --tunnet=bridge:<name> do bridging. */
2258 if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) {
2259 arg += strlen(BRIDGE_PFX);
2263 /* A mac address may follow the bridge name or IP address */
2264 p = strchr(arg, ':');
2266 str2mac(p+1, conf.mac);
2267 add_pci_feature(dev, VIRTIO_NET_F_MAC);
2271 /* arg is now either an IP address or a bridge name */
2273 add_to_bridge(ipfd, tapif, arg);
2277 /* Set up the tun device. */
2278 configure_device(ipfd, tapif, ip);
2280 /* Expect Guest to handle everything except UFO */
2281 add_pci_feature(dev, VIRTIO_NET_F_CSUM);
2282 add_pci_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
2283 add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO4);
2284 add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO6);
2285 add_pci_feature(dev, VIRTIO_NET_F_GUEST_ECN);
2286 add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO4);
2287 add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO6);
2288 add_pci_feature(dev, VIRTIO_NET_F_HOST_ECN);
2289 /* We handle indirect ring entries */
2290 add_pci_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
2291 set_device_config(dev, &conf, sizeof(conf));
2293 /* We don't need the socket any more; setup is done. */
2297 verbose("device %u: tun %s attached to bridge: %s\n",
2298 devices.device_num, tapif, arg);
2300 verbose("device %u: tun %s: %s\n",
2301 devices.device_num, tapif, arg);
2305 /* This hangs off device->priv. */
2307 /* The size of the file. */
2310 /* The file descriptor for the file. */
2318 * The disk only has one virtqueue, so it only has one thread. It is really
2319 * simple: the Guest asks for a block number and we read or write that position
2322 * Before we serviced each virtqueue in a separate thread, that was unacceptably
2323 * slow: the Guest waits until the read is finished before running anything
2324 * else, even if it could have been doing useful work.
2326 * We could have used async I/O, except it's reputed to suck so hard that
2327 * characters actually go missing from your code when you try to use it.
2329 static void blk_request(struct virtqueue *vq)
2331 struct vblk_info *vblk = vq->dev->priv;
2332 unsigned int head, out_num, in_num, wlen;
2335 struct virtio_blk_outhdr out;
2336 struct iovec iov[vq->vring.num];
2340 * Get the next request, where we normally wait. It triggers the
2341 * interrupt to acknowledge previously serviced requests (if any).
2343 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
2345 /* Copy the output header from the front of the iov (adjusts iov) */
2346 iov_consume(iov, out_num, &out, sizeof(out));
2348 /* Find and trim end of iov input array, for our status byte. */
2350 for (i = out_num + in_num - 1; i >= out_num; i--) {
2351 if (iov[i].iov_len > 0) {
2352 in = iov[i].iov_base + iov[i].iov_len - 1;
2358 errx(1, "Bad virtblk cmd with no room for status");
2361 * For historical reasons, block operations are expressed in 512 byte
2364 off = out.sector * 512;
2366 if (out.type & VIRTIO_BLK_T_OUT) {
2370 * Move to the right location in the block file. This can fail
2371 * if they try to write past end.
2373 if (lseek64(vblk->fd, off, SEEK_SET) != off)
2374 err(1, "Bad seek to sector %llu", out.sector);
2376 ret = writev(vblk->fd, iov, out_num);
2377 verbose("WRITE to sector %llu: %i\n", out.sector, ret);
2380 * Grr... Now we know how long the descriptor they sent was, we
2381 * make sure they didn't try to write over the end of the block
2382 * file (possibly extending it).
2384 if (ret > 0 && off + ret > vblk->len) {
2385 /* Trim it back to the correct length */
2386 ftruncate64(vblk->fd, vblk->len);
2387 /* Die, bad Guest, die. */
2388 errx(1, "Write past end %llu+%u", off, ret);
2392 *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
2393 } else if (out.type & VIRTIO_BLK_T_FLUSH) {
2395 ret = fdatasync(vblk->fd);
2396 verbose("FLUSH fdatasync: %i\n", ret);
2398 *in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
2403 * Move to the right location in the block file. This can fail
2404 * if they try to read past end.
2406 if (lseek64(vblk->fd, off, SEEK_SET) != off)
2407 err(1, "Bad seek to sector %llu", out.sector);
2409 ret = readv(vblk->fd, iov + out_num, in_num);
2411 wlen = sizeof(*in) + ret;
2412 *in = VIRTIO_BLK_S_OK;
2415 *in = VIRTIO_BLK_S_IOERR;
2419 /* Finished that request. */
2420 add_used(vq, head, wlen);
2423 /*L:198 This actually sets up a virtual block device. */
2424 static void setup_block_file(const char *filename)
2427 struct vblk_info *vblk;
2428 struct virtio_blk_config conf;
2430 /* Create the device. */
2431 dev = new_pci_device("block", VIRTIO_ID_BLOCK, 0x01, 0x80);
2433 /* The device has one virtqueue, where the Guest places requests. */
2434 add_pci_virtqueue(dev, blk_request);
2436 /* Allocate the room for our own bookkeeping */
2437 vblk = dev->priv = malloc(sizeof(*vblk));
2439 /* First we open the file and store the length. */
2440 vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE);
2441 vblk->len = lseek64(vblk->fd, 0, SEEK_END);
2443 /* Tell Guest how many sectors this device has. */
2444 conf.capacity = cpu_to_le64(vblk->len / 512);
2447 * Tell Guest not to put in too many descriptors at once: two are used
2448 * for the in and out elements.
2450 add_pci_feature(dev, VIRTIO_BLK_F_SEG_MAX);
2451 conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2);
2453 set_device_config(dev, &conf, sizeof(struct virtio_blk_config));
2455 verbose("device %u: virtblock %llu sectors\n",
2456 devices.device_num, le64_to_cpu(conf.capacity));
2460 * Our random number generator device reads from /dev/urandom into the Guest's
2461 * input buffers. The usual case is that the Guest doesn't want random numbers
2462 * and so has no buffers although /dev/urandom is still readable, whereas
2463 * console is the reverse.
2465 * The same logic applies, however.
2471 static void rng_input(struct virtqueue *vq)
2474 unsigned int head, in_num, out_num, totlen = 0;
2475 struct rng_info *rng_info = vq->dev->priv;
2476 struct iovec iov[vq->vring.num];
2478 /* First we need a buffer from the Guests's virtqueue. */
2479 head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
2481 errx(1, "Output buffers in rng?");
2484 * Just like the console write, we loop to cover the whole iovec.
2485 * In this case, short reads actually happen quite a bit.
2487 while (!iov_empty(iov, in_num)) {
2488 len = readv(rng_info->rfd, iov, in_num);
2490 err(1, "Read from /dev/urandom gave %i", len);
2491 iov_consume(iov, in_num, NULL, len);
2495 /* Tell the Guest about the new input. */
2496 add_used(vq, head, totlen);
2500 * This creates a "hardware" random number device for the Guest.
2502 static void setup_rng(void)
2505 struct rng_info *rng_info = malloc(sizeof(*rng_info));
2507 /* Our device's private info simply contains the /dev/urandom fd. */
2508 rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY);
2510 /* Create the new device. */
2511 dev = new_pci_device("rng", VIRTIO_ID_RNG, 0xff, 0);
2512 dev->priv = rng_info;
2514 /* The device has one virtqueue, where the Guest places inbufs. */
2515 add_pci_virtqueue(dev, rng_input);
2517 /* We don't have any configuration space */
2518 no_device_config(dev);
2520 verbose("device %u: rng\n", devices.device_num);
2522 /* That's the end of device setup. */
2524 /*L:230 Reboot is pretty easy: clean up and exec() the Launcher afresh. */
2525 static void __attribute__((noreturn)) restart_guest(void)
2530 * Since we don't track all open fds, we simply close everything beyond
2533 for (i = 3; i < FD_SETSIZE; i++)
2536 /* Reset all the devices (kills all threads). */
2539 execv(main_args[0], main_args);
2540 err(1, "Could not exec %s", main_args[0]);
2544 * Finally we reach the core of the Launcher which runs the Guest, serves
2545 * its input and output, and finally, lays it to rest.
2547 static void __attribute__((noreturn)) run_guest(void)
2550 struct lguest_pending notify;
2553 /* We read from the /dev/lguest device to run the Guest. */
2554 readval = pread(lguest_fd, ¬ify, sizeof(notify), cpu_id);
2556 /* One unsigned long means the Guest did HCALL_NOTIFY */
2557 if (readval == sizeof(notify)) {
2558 if (notify.trap == 0x1F) {
2559 verbose("Notify on address %#08x\n",
2561 handle_output(notify.addr);
2562 } else if (notify.trap == 13) {
2563 verbose("Emulating instruction at %#x\n",
2565 emulate_insn(notify.insn);
2566 } else if (notify.trap == 14) {
2567 verbose("Emulating MMIO at %#x\n",
2569 emulate_mmio(notify.addr, notify.insn);
2571 errx(1, "Unknown trap %i addr %#08x\n",
2572 notify.trap, notify.addr);
2573 /* ENOENT means the Guest died. Reading tells us why. */
2574 } else if (errno == ENOENT) {
2575 char reason[1024] = { 0 };
2576 pread(lguest_fd, reason, sizeof(reason)-1, cpu_id);
2577 errx(1, "%s", reason);
2578 /* ERESTART means that we need to reboot the guest */
2579 } else if (errno == ERESTART) {
2581 /* Anything else means a bug or incompatible change. */
2583 err(1, "Running guest failed");
2587 * This is the end of the Launcher. The good news: we are over halfway
2588 * through! The bad news: the most fiendish part of the code still lies ahead
2591 * Are you ready? Take a deep breath and join me in the core of the Host, in
2595 static struct option opts[] = {
2596 { "verbose", 0, NULL, 'v' },
2597 { "tunnet", 1, NULL, 't' },
2598 { "block", 1, NULL, 'b' },
2599 { "rng", 0, NULL, 'r' },
2600 { "initrd", 1, NULL, 'i' },
2601 { "username", 1, NULL, 'u' },
2602 { "chroot", 1, NULL, 'c' },
2605 static void usage(void)
2607 errx(1, "Usage: lguest [--verbose] "
2608 "[--tunnet=(<ipaddr>:<macaddr>|bridge:<bridgename>:<macaddr>)\n"
2609 "|--block=<filename>|--initrd=<filename>]...\n"
2610 "<mem-in-mb> vmlinux [args...]");
2613 /*L:105 The main routine is where the real work begins: */
2614 int main(int argc, char *argv[])
2616 /* Memory, code startpoint and size of the (optional) initrd. */
2617 unsigned long mem = 0, start, initrd_size = 0;
2618 /* Two temporaries. */
2620 /* The boot information for the Guest. */
2621 struct boot_params *boot;
2622 /* If they specify an initrd file to load. */
2623 const char *initrd_name = NULL;
2625 /* Password structure for initgroups/setres[gu]id */
2626 struct passwd *user_details = NULL;
2628 /* Directory to chroot to */
2629 char *chroot_path = NULL;
2631 /* Save the args: we "reboot" by execing ourselves again. */
2635 * First we initialize the device list. We remember next interrupt
2636 * number to use for devices (1: remember that 0 is used by the timer).
2638 devices.next_irq = 1;
2640 /* We're CPU 0. In fact, that's the only CPU possible right now. */
2644 * We need to know how much memory so we can set up the device
2645 * descriptor and memory pages for the devices as we parse the command
2646 * line. So we quickly look through the arguments to find the amount
2649 for (i = 1; i < argc; i++) {
2650 if (argv[i][0] != '-') {
2651 mem = atoi(argv[i]) * 1024 * 1024;
2653 * We start by mapping anonymous pages over all of
2654 * guest-physical memory range. This fills it with 0,
2655 * and ensures that the Guest won't be killed when it
2656 * tries to access it.
2658 guest_base = map_zeroed_pages(mem / getpagesize()
2661 guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize();
2666 /* The options are fairly straight-forward */
2667 while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
2673 setup_tun_net(optarg);
2676 setup_block_file(optarg);
2682 initrd_name = optarg;
2685 user_details = getpwnam(optarg);
2687 err(1, "getpwnam failed, incorrect username?");
2690 chroot_path = optarg;
2693 warnx("Unknown argument %s", argv[optind]);
2698 * After the other arguments we expect memory and kernel image name,
2699 * followed by command line arguments for the kernel.
2701 if (optind + 2 > argc)
2704 verbose("Guest base is at %p\n", guest_base);
2706 /* We always have a console device */
2709 /* Initialize the (fake) PCI host bridge device. */
2710 init_pci_host_bridge();
2712 /* Now we load the kernel */
2713 start = load_kernel(open_or_die(argv[optind+1], O_RDONLY));
2715 /* Boot information is stashed at physical address 0 */
2716 boot = from_guest_phys(0);
2718 /* Map the initrd image if requested (at top of physical memory) */
2720 initrd_size = load_initrd(initrd_name, mem);
2722 * These are the location in the Linux boot header where the
2723 * start and size of the initrd are expected to be found.
2725 boot->hdr.ramdisk_image = mem - initrd_size;
2726 boot->hdr.ramdisk_size = initrd_size;
2727 /* The bootloader type 0xFF means "unknown"; that's OK. */
2728 boot->hdr.type_of_loader = 0xFF;
2732 * The Linux boot header contains an "E820" memory map: ours is a
2733 * simple, single region.
2735 boot->e820_entries = 1;
2736 boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM });
2738 * The boot header contains a command line pointer: we put the command
2739 * line after the boot header.
2741 boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1);
2742 /* We use a simple helper to copy the arguments separated by spaces. */
2743 concat((char *)(boot + 1), argv+optind+2);
2745 /* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */
2746 boot->hdr.kernel_alignment = 0x1000000;
2748 /* Boot protocol version: 2.07 supports the fields for lguest. */
2749 boot->hdr.version = 0x207;
2751 /* The hardware_subarch value of "1" tells the Guest it's an lguest. */
2752 boot->hdr.hardware_subarch = 1;
2754 /* Tell the entry path not to try to reload segment registers. */
2755 boot->hdr.loadflags |= KEEP_SEGMENTS;
2757 /* We tell the kernel to initialize the Guest. */
2760 /* Ensure that we terminate if a device-servicing child dies. */
2761 signal(SIGCHLD, kill_launcher);
2763 /* If we exit via err(), this kills all the threads, restores tty. */
2764 atexit(cleanup_devices);
2766 /* If requested, chroot to a directory */
2768 if (chroot(chroot_path) != 0)
2769 err(1, "chroot(\"%s\") failed", chroot_path);
2771 if (chdir("/") != 0)
2772 err(1, "chdir(\"/\") failed");
2774 verbose("chroot done\n");
2777 /* If requested, drop privileges */
2782 u = user_details->pw_uid;
2783 g = user_details->pw_gid;
2785 if (initgroups(user_details->pw_name, g) != 0)
2786 err(1, "initgroups failed");
2788 if (setresgid(g, g, g) != 0)
2789 err(1, "setresgid failed");
2791 if (setresuid(u, u, u) != 0)
2792 err(1, "setresuid failed");
2794 verbose("Dropping privileges completed\n");
2797 /* Finally, run the Guest. This doesn't return. */
2803 * Mastery is done: you now know everything I do.
2805 * But surely you have seen code, features and bugs in your wanderings which
2806 * you now yearn to attack? That is the real game, and I look forward to you
2807 * patching and forking lguest into the Your-Name-Here-visor.
2809 * Farewell, and good coding!