2 * The pagetable code, on the other hand, still shows the scars of
3 * previous encounters. It's functional, and as neat as it can be in the
4 * circumstances, but be wary, for these things are subtle and break easily.
5 * The Guest provides a virtual to physical mapping, but we can neither trust
6 * it nor use it: we verify and convert it here then point the CPU to the
7 * converted Guest pages when running the Guest.
10 /* Copyright (C) Rusty Russell IBM Corporation 2006.
11 * GPL v2 and any later version */
13 #include <linux/types.h>
14 #include <linux/spinlock.h>
15 #include <linux/random.h>
16 #include <linux/percpu.h>
17 #include <asm/tlbflush.h>
18 #include <asm/uaccess.h>
19 #include <asm/bootparam.h>
23 * We hold reference to pages, which prevents them from being swapped.
24 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
25 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
26 * could probably consider launching Guests as non-root.
32 * We use two-level page tables for the Guest. If you're not entirely
33 * comfortable with virtual addresses, physical addresses and page tables then
34 * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with
37 * The Guest keeps page tables, but we maintain the actual ones here: these are
38 * called "shadow" page tables. Which is a very Guest-centric name: these are
39 * the real page tables the CPU uses, although we keep them up to date to
40 * reflect the Guest's. (See what I mean about weird naming? Since when do
41 * shadows reflect anything?)
43 * Anyway, this is the most complicated part of the Host code. There are seven
45 * (i) Looking up a page table entry when the Guest faults,
46 * (ii) Making sure the Guest stack is mapped,
47 * (iii) Setting up a page table entry when the Guest tells us one has changed,
48 * (iv) Switching page tables,
49 * (v) Flushing (throwing away) page tables,
50 * (vi) Mapping the Switcher when the Guest is about to run,
51 * (vii) Setting up the page tables initially.
55 * 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is
56 * conveniently placed at the top 4MB, so it uses a separate, complete PTE
59 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
62 * For PAE we need the PMD index as well. We use the last 2MB, so we
63 * will need the last pmd entry of the last pmd page.
66 #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
67 #define RESERVE_MEM 2U
68 #define CHECK_GPGD_MASK _PAGE_PRESENT
70 #define RESERVE_MEM 4U
71 #define CHECK_GPGD_MASK _PAGE_TABLE
75 * We actually need a separate PTE page for each CPU. Remember that after the
76 * Switcher code itself comes two pages for each CPU, and we don't want this
77 * CPU's guest to see the pages of any other CPU.
79 static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
80 #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
83 * The page table code is curly enough to need helper functions to keep it
86 * There are two functions which return pointers to the shadow (aka "real")
89 * spgd_addr() takes the virtual address and returns a pointer to the top-level
90 * page directory entry (PGD) for that address. Since we keep track of several
91 * page tables, the "i" argument tells us which one we're interested in (it's
92 * usually the current one).
94 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
96 unsigned int index = pgd_index(vaddr);
98 #ifndef CONFIG_X86_PAE
99 /* We kill any Guest trying to touch the Switcher addresses. */
100 if (index >= SWITCHER_PGD_INDEX) {
101 kill_guest(cpu, "attempt to access switcher pages");
105 /* Return a pointer index'th pgd entry for the i'th page table. */
106 return &cpu->lg->pgdirs[i].pgdir[index];
109 #ifdef CONFIG_X86_PAE
111 * This routine then takes the PGD entry given above, which contains the
112 * address of the PMD page. It then returns a pointer to the PMD entry for the
115 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
117 unsigned int index = pmd_index(vaddr);
120 /* We kill any Guest trying to touch the Switcher addresses. */
121 if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
122 index >= SWITCHER_PMD_INDEX) {
123 kill_guest(cpu, "attempt to access switcher pages");
127 /* You should never call this if the PGD entry wasn't valid */
128 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
129 page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
136 * This routine then takes the page directory entry returned above, which
137 * contains the address of the page table entry (PTE) page. It then returns a
138 * pointer to the PTE entry for the given address.
140 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
142 #ifdef CONFIG_X86_PAE
143 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
144 pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
146 /* You should never call this if the PMD entry wasn't valid */
147 BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
149 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
150 /* You should never call this if the PGD entry wasn't valid */
151 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
154 return &page[pte_index(vaddr)];
158 * These two functions just like the above two, except they access the Guest
159 * page tables. Hence they return a Guest address.
161 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
163 unsigned int index = vaddr >> (PGDIR_SHIFT);
164 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
167 #ifdef CONFIG_X86_PAE
168 static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
170 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
171 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
172 return gpage + pmd_index(vaddr) * sizeof(pmd_t);
175 static unsigned long gpte_addr(struct lg_cpu *cpu,
176 pmd_t gpmd, unsigned long vaddr)
178 unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
180 BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
181 return gpage + pte_index(vaddr) * sizeof(pte_t);
184 static unsigned long gpte_addr(struct lg_cpu *cpu,
185 pgd_t gpgd, unsigned long vaddr)
187 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
189 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
190 return gpage + pte_index(vaddr) * sizeof(pte_t);
196 * get_pfn is slow: we could probably try to grab batches of pages here as
197 * an optimization (ie. pre-faulting).
201 * This routine takes a page number given by the Guest and converts it to
202 * an actual, physical page number. It can fail for several reasons: the
203 * virtual address might not be mapped by the Launcher, the write flag is set
204 * and the page is read-only, or the write flag was set and the page was
205 * shared so had to be copied, but we ran out of memory.
207 * This holds a reference to the page, so release_pte() is careful to put that
210 static unsigned long get_pfn(unsigned long virtpfn, int write)
214 /* gup me one page at this address please! */
215 if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
216 return page_to_pfn(page);
218 /* This value indicates failure. */
223 * Converting a Guest page table entry to a shadow (ie. real) page table
224 * entry can be a little tricky. The flags are (almost) the same, but the
225 * Guest PTE contains a virtual page number: the CPU needs the real page
228 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
230 unsigned long pfn, base, flags;
233 * The Guest sets the global flag, because it thinks that it is using
234 * PGE. We only told it to use PGE so it would tell us whether it was
235 * flushing a kernel mapping or a userspace mapping. We don't actually
236 * use the global bit, so throw it away.
238 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
240 /* The Guest's pages are offset inside the Launcher. */
241 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
244 * We need a temporary "unsigned long" variable to hold the answer from
245 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
246 * fit in spte.pfn. get_pfn() finds the real physical number of the
247 * page, given the virtual number.
249 pfn = get_pfn(base + pte_pfn(gpte), write);
251 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
253 * When we destroy the Guest, we'll go through the shadow page
254 * tables and release_pte() them. Make sure we don't think
259 /* Now we assemble our shadow PTE from the page number and flags. */
260 return pfn_pte(pfn, __pgprot(flags));
263 /*H:460 And to complete the chain, release_pte() looks like this: */
264 static void release_pte(pte_t pte)
267 * Remember that get_user_pages_fast() took a reference to the page, in
268 * get_pfn()? We have to put it back now.
270 if (pte_flags(pte) & _PAGE_PRESENT)
271 put_page(pte_page(pte));
275 static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
277 if ((pte_flags(gpte) & _PAGE_PSE) ||
278 pte_pfn(gpte) >= cpu->lg->pfn_limit)
279 kill_guest(cpu, "bad page table entry");
282 static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
284 if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
285 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
286 kill_guest(cpu, "bad page directory entry");
289 #ifdef CONFIG_X86_PAE
290 static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
292 if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
293 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
294 kill_guest(cpu, "bad page middle directory entry");
299 * (i) Looking up a page table entry when the Guest faults.
301 * We saw this call in run_guest(): when we see a page fault in the Guest, we
302 * come here. That's because we only set up the shadow page tables lazily as
303 * they're needed, so we get page faults all the time and quietly fix them up
304 * and return to the Guest without it knowing.
306 * If we fixed up the fault (ie. we mapped the address), this routine returns
307 * true. Otherwise, it was a real fault and we need to tell the Guest.
309 bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
313 unsigned long gpte_ptr;
317 #ifdef CONFIG_X86_PAE
322 /* First step: get the top-level Guest page table entry. */
323 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
324 /* Toplevel not present? We can't map it in. */
325 if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
328 /* Now look at the matching shadow entry. */
329 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
330 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
331 /* No shadow entry: allocate a new shadow PTE page. */
332 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
334 * This is not really the Guest's fault, but killing it is
335 * simple for this corner case.
338 kill_guest(cpu, "out of memory allocating pte page");
341 /* We check that the Guest pgd is OK. */
342 check_gpgd(cpu, gpgd);
344 * And we copy the flags to the shadow PGD entry. The page
345 * number in the shadow PGD is the page we just allocated.
347 set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
350 #ifdef CONFIG_X86_PAE
351 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
352 /* Middle level not present? We can't map it in. */
353 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
356 /* Now look at the matching shadow entry. */
357 spmd = spmd_addr(cpu, *spgd, vaddr);
359 if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
360 /* No shadow entry: allocate a new shadow PTE page. */
361 unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
364 * This is not really the Guest's fault, but killing it is
365 * simple for this corner case.
368 kill_guest(cpu, "out of memory allocating pte page");
372 /* We check that the Guest pmd is OK. */
373 check_gpmd(cpu, gpmd);
376 * And we copy the flags to the shadow PMD entry. The page
377 * number in the shadow PMD is the page we just allocated.
379 native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
383 * OK, now we look at the lower level in the Guest page table: keep its
384 * address, because we might update it later.
386 gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
389 * OK, now we look at the lower level in the Guest page table: keep its
390 * address, because we might update it later.
392 gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
394 gpte = lgread(cpu, gpte_ptr, pte_t);
396 /* If this page isn't in the Guest page tables, we can't page it in. */
397 if (!(pte_flags(gpte) & _PAGE_PRESENT))
401 * Check they're not trying to write to a page the Guest wants
402 * read-only (bit 2 of errcode == write).
404 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
407 /* User access to a kernel-only page? (bit 3 == user access) */
408 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
412 * Check that the Guest PTE flags are OK, and the page number is below
413 * the pfn_limit (ie. not mapping the Launcher binary).
415 check_gpte(cpu, gpte);
417 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
418 gpte = pte_mkyoung(gpte);
420 gpte = pte_mkdirty(gpte);
422 /* Get the pointer to the shadow PTE entry we're going to set. */
423 spte = spte_addr(cpu, *spgd, vaddr);
426 * If there was a valid shadow PTE entry here before, we release it.
427 * This can happen with a write to a previously read-only entry.
432 * If this is a write, we insist that the Guest page is writable (the
433 * final arg to gpte_to_spte()).
436 *spte = gpte_to_spte(cpu, gpte, 1);
439 * If this is a read, don't set the "writable" bit in the page
440 * table entry, even if the Guest says it's writable. That way
441 * we will come back here when a write does actually occur, so
442 * we can update the Guest's _PAGE_DIRTY flag.
444 native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
447 * Finally, we write the Guest PTE entry back: we've set the
448 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
450 lgwrite(cpu, gpte_ptr, pte_t, gpte);
453 * The fault is fixed, the page table is populated, the mapping
454 * manipulated, the result returned and the code complete. A small
455 * delay and a trace of alliteration are the only indications the Guest
456 * has that a page fault occurred at all.
462 * (ii) Making sure the Guest stack is mapped.
464 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
465 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
466 * we've seen that logic is quite long, and usually the stack pages are already
467 * mapped, so it's overkill.
469 * This is a quick version which answers the question: is this virtual address
470 * mapped by the shadow page tables, and is it writable?
472 static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
477 #ifdef CONFIG_X86_PAE
480 /* Look at the current top level entry: is it present? */
481 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
482 if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
485 #ifdef CONFIG_X86_PAE
486 spmd = spmd_addr(cpu, *spgd, vaddr);
487 if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
492 * Check the flags on the pte entry itself: it must be present and
495 flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
497 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
501 * So, when pin_stack_pages() asks us to pin a page, we check if it's already
502 * in the page tables, and if not, we call demand_page() with error code 2
505 void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
507 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
508 kill_guest(cpu, "bad stack page %#lx", vaddr);
511 #ifdef CONFIG_X86_PAE
512 static void release_pmd(pmd_t *spmd)
514 /* If the entry's not present, there's nothing to release. */
515 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
517 pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
518 /* For each entry in the page, we might need to release it. */
519 for (i = 0; i < PTRS_PER_PTE; i++)
520 release_pte(ptepage[i]);
521 /* Now we can free the page of PTEs */
522 free_page((long)ptepage);
523 /* And zero out the PMD entry so we never release it twice. */
524 native_set_pmd(spmd, __pmd(0));
528 static void release_pgd(pgd_t *spgd)
530 /* If the entry's not present, there's nothing to release. */
531 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
533 pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
535 for (i = 0; i < PTRS_PER_PMD; i++)
536 release_pmd(&pmdpage[i]);
538 /* Now we can free the page of PMDs */
539 free_page((long)pmdpage);
540 /* And zero out the PGD entry so we never release it twice. */
541 set_pgd(spgd, __pgd(0));
545 #else /* !CONFIG_X86_PAE */
546 /*H:450 If we chase down the release_pgd() code, it looks like this: */
547 static void release_pgd(pgd_t *spgd)
549 /* If the entry's not present, there's nothing to release. */
550 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
553 * Converting the pfn to find the actual PTE page is easy: turn
554 * the page number into a physical address, then convert to a
555 * virtual address (easy for kernel pages like this one).
557 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
558 /* For each entry in the page, we might need to release it. */
559 for (i = 0; i < PTRS_PER_PTE; i++)
560 release_pte(ptepage[i]);
561 /* Now we can free the page of PTEs */
562 free_page((long)ptepage);
563 /* And zero out the PGD entry so we never release it twice. */
570 * We saw flush_user_mappings() twice: once from the flush_user_mappings()
571 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
572 * It simply releases every PTE page from 0 up to the Guest's kernel address.
574 static void flush_user_mappings(struct lguest *lg, int idx)
577 /* Release every pgd entry up to the kernel's address. */
578 for (i = 0; i < pgd_index(lg->kernel_address); i++)
579 release_pgd(lg->pgdirs[idx].pgdir + i);
583 * (v) Flushing (throwing away) page tables,
585 * The Guest has a hypercall to throw away the page tables: it's used when a
586 * large number of mappings have been changed.
588 void guest_pagetable_flush_user(struct lg_cpu *cpu)
590 /* Drop the userspace part of the current page table. */
591 flush_user_mappings(cpu->lg, cpu->cpu_pgd);
595 /* We walk down the guest page tables to get a guest-physical address */
596 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
600 #ifdef CONFIG_X86_PAE
603 /* First step: get the top-level Guest page table entry. */
604 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
605 /* Toplevel not present? We can't map it in. */
606 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
607 kill_guest(cpu, "Bad address %#lx", vaddr);
611 #ifdef CONFIG_X86_PAE
612 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
613 if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
614 kill_guest(cpu, "Bad address %#lx", vaddr);
615 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
617 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
619 if (!(pte_flags(gpte) & _PAGE_PRESENT))
620 kill_guest(cpu, "Bad address %#lx", vaddr);
622 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
626 * We keep several page tables. This is a simple routine to find the page
627 * table (if any) corresponding to this top-level address the Guest has given
630 static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
633 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
634 if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
640 * And this is us, creating the new page directory. If we really do
641 * allocate a new one (and so the kernel parts are not there), we set
644 static unsigned int new_pgdir(struct lg_cpu *cpu,
645 unsigned long gpgdir,
649 #ifdef CONFIG_X86_PAE
654 * We pick one entry at random to throw out. Choosing the Least
655 * Recently Used might be better, but this is easy.
657 next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
658 /* If it's never been allocated at all before, try now. */
659 if (!cpu->lg->pgdirs[next].pgdir) {
660 cpu->lg->pgdirs[next].pgdir =
661 (pgd_t *)get_zeroed_page(GFP_KERNEL);
662 /* If the allocation fails, just keep using the one we have */
663 if (!cpu->lg->pgdirs[next].pgdir)
666 #ifdef CONFIG_X86_PAE
668 * In PAE mode, allocate a pmd page and populate the
671 pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
673 free_page((long)cpu->lg->pgdirs[next].pgdir);
674 set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
677 set_pgd(cpu->lg->pgdirs[next].pgdir +
679 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
681 * This is a blank page, so there are no kernel
682 * mappings: caller must map the stack!
691 /* Record which Guest toplevel this shadows. */
692 cpu->lg->pgdirs[next].gpgdir = gpgdir;
693 /* Release all the non-kernel mappings. */
694 flush_user_mappings(cpu->lg, next);
700 * (iv) Switching page tables
702 * Now we've seen all the page table setting and manipulation, let's see
703 * what happens when the Guest changes page tables (ie. changes the top-level
704 * pgdir). This occurs on almost every context switch.
706 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
708 int newpgdir, repin = 0;
710 /* Look to see if we have this one already. */
711 newpgdir = find_pgdir(cpu->lg, pgtable);
713 * If not, we allocate or mug an existing one: if it's a fresh one,
714 * repin gets set to 1.
716 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
717 newpgdir = new_pgdir(cpu, pgtable, &repin);
718 /* Change the current pgd index to the new one. */
719 cpu->cpu_pgd = newpgdir;
720 /* If it was completely blank, we map in the Guest kernel stack */
722 pin_stack_pages(cpu);
726 * Finally, a routine which throws away everything: all PGD entries in all
727 * the shadow page tables, including the Guest's kernel mappings. This is used
728 * when we destroy the Guest.
730 static void release_all_pagetables(struct lguest *lg)
734 /* Every shadow pagetable this Guest has */
735 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
736 if (lg->pgdirs[i].pgdir) {
737 #ifdef CONFIG_X86_PAE
742 /* Get the last pmd page. */
743 spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
744 pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
747 * And release the pmd entries of that pmd page,
748 * except for the switcher pmd.
750 for (k = 0; k < SWITCHER_PMD_INDEX; k++)
751 release_pmd(&pmdpage[k]);
753 /* Every PGD entry except the Switcher at the top */
754 for (j = 0; j < SWITCHER_PGD_INDEX; j++)
755 release_pgd(lg->pgdirs[i].pgdir + j);
760 * We also throw away everything when a Guest tells us it's changed a kernel
761 * mapping. Since kernel mappings are in every page table, it's easiest to
762 * throw them all away. This traps the Guest in amber for a while as
763 * everything faults back in, but it's rare.
765 void guest_pagetable_clear_all(struct lg_cpu *cpu)
767 release_all_pagetables(cpu->lg);
768 /* We need the Guest kernel stack mapped again. */
769 pin_stack_pages(cpu);
774 * Since we throw away all mappings when a kernel mapping changes, our
775 * performance sucks for guests using highmem. In fact, a guest with
776 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
777 * usually slower than a Guest with less memory.
779 * This, of course, cannot be fixed. It would take some kind of... well, I
780 * don't know, but the term "puissant code-fu" comes to mind.
784 * This is the routine which actually sets the page table entry for then
785 * "idx"'th shadow page table.
787 * Normally, we can just throw out the old entry and replace it with 0: if they
788 * use it demand_page() will put the new entry in. We need to do this anyway:
789 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
790 * is read from, and _PAGE_DIRTY when it's written to.
792 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
793 * these bits on PTEs immediately anyway. This is done to save the CPU from
794 * having to update them, but it helps us the same way: if they set
795 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
796 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
798 static void do_set_pte(struct lg_cpu *cpu, int idx,
799 unsigned long vaddr, pte_t gpte)
801 /* Look up the matching shadow page directory entry. */
802 pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
803 #ifdef CONFIG_X86_PAE
807 /* If the top level isn't present, there's no entry to update. */
808 if (pgd_flags(*spgd) & _PAGE_PRESENT) {
809 #ifdef CONFIG_X86_PAE
810 spmd = spmd_addr(cpu, *spgd, vaddr);
811 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
813 /* Otherwise, start by releasing the existing entry. */
814 pte_t *spte = spte_addr(cpu, *spgd, vaddr);
818 * If they're setting this entry as dirty or accessed,
819 * we might as well put that entry they've given us in
820 * now. This shaves 10% off a copy-on-write
823 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
824 check_gpte(cpu, gpte);
826 gpte_to_spte(cpu, gpte,
827 pte_flags(gpte) & _PAGE_DIRTY));
830 * Otherwise kill it and we can demand_page()
833 native_set_pte(spte, __pte(0));
835 #ifdef CONFIG_X86_PAE
842 * Updating a PTE entry is a little trickier.
844 * We keep track of several different page tables (the Guest uses one for each
845 * process, so it makes sense to cache at least a few). Each of these have
846 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
847 * all processes. So when the page table above that address changes, we update
848 * all the page tables, not just the current one. This is rare.
850 * The benefit is that when we have to track a new page table, we can keep all
851 * the kernel mappings. This speeds up context switch immensely.
853 void guest_set_pte(struct lg_cpu *cpu,
854 unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
857 * Kernel mappings must be changed on all top levels. Slow, but doesn't
860 if (vaddr >= cpu->lg->kernel_address) {
862 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
863 if (cpu->lg->pgdirs[i].pgdir)
864 do_set_pte(cpu, i, vaddr, gpte);
866 /* Is this page table one we have a shadow for? */
867 int pgdir = find_pgdir(cpu->lg, gpgdir);
868 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
869 /* If so, do the update. */
870 do_set_pte(cpu, pgdir, vaddr, gpte);
875 * (iii) Setting up a page table entry when the Guest tells us one has changed.
877 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
878 * with the other side of page tables while we're here: what happens when the
879 * Guest asks for a page table to be updated?
881 * We already saw that demand_page() will fill in the shadow page tables when
882 * needed, so we can simply remove shadow page table entries whenever the Guest
883 * tells us they've changed. When the Guest tries to use the new entry it will
884 * fault and demand_page() will fix it up.
886 * So with that in mind here's our code to to update a (top-level) PGD entry:
888 void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
892 if (idx >= SWITCHER_PGD_INDEX)
895 /* If they're talking about a page table we have a shadow for... */
896 pgdir = find_pgdir(lg, gpgdir);
897 if (pgdir < ARRAY_SIZE(lg->pgdirs))
898 /* ... throw it away. */
899 release_pgd(lg->pgdirs[pgdir].pgdir + idx);
901 #ifdef CONFIG_X86_PAE
902 void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
904 guest_pagetable_clear_all(&lg->cpus[0]);
909 * Once we know how much memory we have we can construct simple identity (which
910 * set virtual == physical) and linear mappings which will get the Guest far
911 * enough into the boot to create its own.
913 * We lay them out of the way, just below the initrd (which is why we need to
914 * know its size here).
916 static unsigned long setup_pagetables(struct lguest *lg,
918 unsigned long initrd_size)
921 pte_t __user *linear;
922 unsigned long mem_base = (unsigned long)lg->mem_base;
923 unsigned int mapped_pages, i, linear_pages;
924 #ifdef CONFIG_X86_PAE
930 unsigned int phys_linear;
934 * We have mapped_pages frames to map, so we need linear_pages page
935 * tables to map them.
937 mapped_pages = mem / PAGE_SIZE;
938 linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE;
940 /* We put the toplevel page directory page at the top of memory. */
941 pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE);
943 /* Now we use the next linear_pages pages as pte pages */
944 linear = (void *)pgdir - linear_pages * PAGE_SIZE;
946 #ifdef CONFIG_X86_PAE
947 pmds = (void *)linear - PAGE_SIZE;
950 * Linear mapping is easy: put every page's address into the
953 for (i = 0; i < mapped_pages; i++) {
955 pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER));
956 if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0)
961 * The top level points to the linear page table pages above.
962 * We setup the identity and linear mappings here.
964 #ifdef CONFIG_X86_PAE
965 for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
966 i += PTRS_PER_PTE, j++) {
967 native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
968 - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
970 if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0)
974 set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
975 if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
977 if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
980 phys_linear = (unsigned long)linear - mem_base;
981 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
983 pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
984 (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
986 if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
987 || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
995 * We return the top level (guest-physical) address: remember where
998 return (unsigned long)pgdir - mem_base;
1002 * (vii) Setting up the page tables initially.
1004 * When a Guest is first created, the Launcher tells us where the toplevel of
1005 * its first page table is. We set some things up here:
1007 int init_guest_pagetable(struct lguest *lg)
1011 struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
1012 #ifdef CONFIG_X86_PAE
1017 * Get the Guest memory size and the ramdisk size from the boot header
1018 * located at lg->mem_base (Guest address 0).
1020 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
1021 || get_user(initrd_size, &boot->hdr.ramdisk_size))
1025 * We start on the first shadow page table, and give it a blank PGD
1028 lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size);
1029 if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir))
1030 return lg->pgdirs[0].gpgdir;
1031 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
1032 if (!lg->pgdirs[0].pgdir)
1034 #ifdef CONFIG_X86_PAE
1035 pgd = lg->pgdirs[0].pgdir;
1036 pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
1040 set_pgd(pgd + SWITCHER_PGD_INDEX,
1041 __pgd(__pa(pmd_table) | _PAGE_PRESENT));
1043 lg->cpus[0].cpu_pgd = 0;
1047 /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
1048 void page_table_guest_data_init(struct lg_cpu *cpu)
1050 /* We get the kernel address: above this is all kernel memory. */
1051 if (get_user(cpu->lg->kernel_address,
1052 &cpu->lg->lguest_data->kernel_address)
1054 * We tell the Guest that it can't use the top 2 or 4 MB
1055 * of virtual addresses used by the Switcher.
1057 || put_user(RESERVE_MEM * 1024 * 1024,
1058 &cpu->lg->lguest_data->reserve_mem)
1059 || put_user(cpu->lg->pgdirs[0].gpgdir,
1060 &cpu->lg->lguest_data->pgdir))
1061 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
1064 * In flush_user_mappings() we loop from 0 to
1065 * "pgd_index(lg->kernel_address)". This assumes it won't hit the
1066 * Switcher mappings, so check that now.
1068 #ifdef CONFIG_X86_PAE
1069 if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
1070 pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
1072 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
1074 kill_guest(cpu, "bad kernel address %#lx",
1075 cpu->lg->kernel_address);
1078 /* When a Guest dies, our cleanup is fairly simple. */
1079 void free_guest_pagetable(struct lguest *lg)
1083 /* Throw away all page table pages. */
1084 release_all_pagetables(lg);
1085 /* Now free the top levels: free_page() can handle 0 just fine. */
1086 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
1087 free_page((long)lg->pgdirs[i].pgdir);
1091 * (vi) Mapping the Switcher when the Guest is about to run.
1093 * The Switcher and the two pages for this CPU need to be visible in the
1094 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages
1095 * for each CPU already set up, we just need to hook them in now we know which
1096 * Guest is about to run on this CPU.
1098 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1100 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
1104 #ifdef CONFIG_X86_PAE
1108 native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
1109 PAGE_SHIFT, PAGE_KERNEL_EXEC));
1111 pmd_table = __va(pgd_pfn(cpu->lg->
1112 pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
1114 native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
1119 * Make the last PGD entry for this Guest point to the Switcher's PTE
1120 * page for this CPU (with appropriate flags).
1122 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
1124 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
1128 * We also change the Switcher PTE page. When we're running the Guest,
1129 * we want the Guest's "regs" page to appear where the first Switcher
1130 * page for this CPU is. This is an optimization: when the Switcher
1131 * saves the Guest registers, it saves them into the first page of this
1132 * CPU's "struct lguest_pages": if we make sure the Guest's register
1133 * page is already mapped there, we don't have to copy them out
1136 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
1137 native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL));
1138 native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)],
1143 static void free_switcher_pte_pages(void)
1147 for_each_possible_cpu(i)
1148 free_page((long)switcher_pte_page(i));
1152 * Setting up the Switcher PTE page for given CPU is fairly easy, given
1153 * the CPU number and the "struct page"s for the Switcher code itself.
1155 * Currently the Switcher is less than a page long, so "pages" is always 1.
1157 static __init void populate_switcher_pte_page(unsigned int cpu,
1158 struct page *switcher_page[],
1162 pte_t *pte = switcher_pte_page(cpu);
1164 /* The first entries are easy: they map the Switcher code. */
1165 for (i = 0; i < pages; i++) {
1166 native_set_pte(&pte[i], mk_pte(switcher_page[i],
1167 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
1170 /* The only other thing we map is this CPU's pair of pages. */
1173 /* First page (Guest registers) is writable from the Guest */
1174 native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]),
1175 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)));
1178 * The second page contains the "struct lguest_ro_state", and is
1181 native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]),
1182 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)));
1186 * We've made it through the page table code. Perhaps our tired brains are
1187 * still processing the details, or perhaps we're simply glad it's over.
1189 * If nothing else, note that all this complexity in juggling shadow page tables
1190 * in sync with the Guest's page tables is for one reason: for most Guests this
1191 * page table dance determines how bad performance will be. This is why Xen
1192 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
1193 * have implemented shadow page table support directly into hardware.
1195 * There is just one file remaining in the Host.
1199 * At boot or module load time, init_pagetables() allocates and populates
1200 * the Switcher PTE page for each CPU.
1202 __init int init_pagetables(struct page **switcher_page, unsigned int pages)
1206 for_each_possible_cpu(i) {
1207 switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
1208 if (!switcher_pte_page(i)) {
1209 free_switcher_pte_pages();
1212 populate_switcher_pte_page(i, switcher_page, pages);
1218 /* Cleaning up simply involves freeing the PTE page for each CPU. */
1219 void free_pagetables(void)
1221 free_switcher_pte_pages();