2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <linux/hugetlb.h>
23 #include <trace/events/kvm.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_mmio.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_emulate.h>
34 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
36 static pgd_t *boot_hyp_pgd;
37 static pgd_t *hyp_pgd;
38 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
40 static void *init_bounce_page;
41 static unsigned long hyp_idmap_start;
42 static unsigned long hyp_idmap_end;
43 static phys_addr_t hyp_idmap_vector;
45 #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
47 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
50 * This function also gets called when dealing with HYP page
51 * tables. As HYP doesn't have an associated struct kvm (and
52 * the HYP page tables are fairly static), we don't do
56 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
59 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
64 BUG_ON(max > KVM_NR_MEM_OBJS);
65 if (cache->nobjs >= min)
67 while (cache->nobjs < max) {
68 page = (void *)__get_free_page(PGALLOC_GFP);
71 cache->objects[cache->nobjs++] = page;
76 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
79 free_page((unsigned long)mc->objects[--mc->nobjs]);
82 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
86 BUG_ON(!mc || !mc->nobjs);
87 p = mc->objects[--mc->nobjs];
91 static bool page_empty(void *ptr)
93 struct page *ptr_page = virt_to_page(ptr);
94 return page_count(ptr_page) == 1;
97 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
101 kvm_tlb_flush_vmid_ipa(kvm, addr);
103 pmd_t *pmd_table = pmd_offset(pud, 0);
105 kvm_tlb_flush_vmid_ipa(kvm, addr);
106 pmd_free(NULL, pmd_table);
108 put_page(virt_to_page(pud));
111 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
113 if (kvm_pmd_huge(*pmd)) {
115 kvm_tlb_flush_vmid_ipa(kvm, addr);
117 pte_t *pte_table = pte_offset_kernel(pmd, 0);
119 kvm_tlb_flush_vmid_ipa(kvm, addr);
120 pte_free_kernel(NULL, pte_table);
122 put_page(virt_to_page(pmd));
125 static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
127 if (pte_present(*pte)) {
128 kvm_set_pte(pte, __pte(0));
129 put_page(virt_to_page(pte));
130 kvm_tlb_flush_vmid_ipa(kvm, addr);
134 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
135 unsigned long long start, u64 size)
141 unsigned long long addr = start, end = start + size;
145 pgd = pgdp + pgd_index(addr);
146 pud = pud_offset(pgd, addr);
148 if (pud_none(*pud)) {
149 addr = kvm_pud_addr_end(addr, end);
153 if (pud_huge(*pud)) {
155 * If we are dealing with a huge pud, just clear it and
158 clear_pud_entry(kvm, pud, addr);
159 addr = kvm_pud_addr_end(addr, end);
163 pmd = pmd_offset(pud, addr);
164 if (pmd_none(*pmd)) {
165 addr = kvm_pmd_addr_end(addr, end);
169 if (!kvm_pmd_huge(*pmd)) {
170 pte = pte_offset_kernel(pmd, addr);
171 clear_pte_entry(kvm, pte, addr);
172 next = addr + PAGE_SIZE;
176 * If the pmd entry is to be cleared, walk back up the ladder
178 if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) {
179 clear_pmd_entry(kvm, pmd, addr);
180 next = kvm_pmd_addr_end(addr, end);
181 if (page_empty(pmd) && !page_empty(pud)) {
182 clear_pud_entry(kvm, pud, addr);
183 next = kvm_pud_addr_end(addr, end);
191 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
192 phys_addr_t addr, phys_addr_t end)
196 pte = pte_offset_kernel(pmd, addr);
198 if (!pte_none(*pte)) {
199 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
200 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
202 } while (pte++, addr += PAGE_SIZE, addr != end);
205 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
206 phys_addr_t addr, phys_addr_t end)
211 pmd = pmd_offset(pud, addr);
213 next = kvm_pmd_addr_end(addr, end);
214 if (!pmd_none(*pmd)) {
215 if (kvm_pmd_huge(*pmd)) {
216 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
217 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
219 stage2_flush_ptes(kvm, pmd, addr, next);
222 } while (pmd++, addr = next, addr != end);
225 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
226 phys_addr_t addr, phys_addr_t end)
231 pud = pud_offset(pgd, addr);
233 next = kvm_pud_addr_end(addr, end);
234 if (!pud_none(*pud)) {
235 if (pud_huge(*pud)) {
236 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
237 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
239 stage2_flush_pmds(kvm, pud, addr, next);
242 } while (pud++, addr = next, addr != end);
245 static void stage2_flush_memslot(struct kvm *kvm,
246 struct kvm_memory_slot *memslot)
248 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
249 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
253 pgd = kvm->arch.pgd + pgd_index(addr);
255 next = kvm_pgd_addr_end(addr, end);
256 stage2_flush_puds(kvm, pgd, addr, next);
257 } while (pgd++, addr = next, addr != end);
261 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
262 * @kvm: The struct kvm pointer
264 * Go through the stage 2 page tables and invalidate any cache lines
265 * backing memory already mapped to the VM.
267 void stage2_flush_vm(struct kvm *kvm)
269 struct kvm_memslots *slots;
270 struct kvm_memory_slot *memslot;
273 idx = srcu_read_lock(&kvm->srcu);
274 spin_lock(&kvm->mmu_lock);
276 slots = kvm_memslots(kvm);
277 kvm_for_each_memslot(memslot, slots)
278 stage2_flush_memslot(kvm, memslot);
280 spin_unlock(&kvm->mmu_lock);
281 srcu_read_unlock(&kvm->srcu, idx);
285 * free_boot_hyp_pgd - free HYP boot page tables
287 * Free the HYP boot page tables. The bounce page is also freed.
289 void free_boot_hyp_pgd(void)
291 mutex_lock(&kvm_hyp_pgd_mutex);
294 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
295 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
301 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
303 kfree(init_bounce_page);
304 init_bounce_page = NULL;
306 mutex_unlock(&kvm_hyp_pgd_mutex);
310 * free_hyp_pgds - free Hyp-mode page tables
312 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
313 * therefore contains either mappings in the kernel memory area (above
314 * PAGE_OFFSET), or device mappings in the vmalloc range (from
315 * VMALLOC_START to VMALLOC_END).
317 * boot_hyp_pgd should only map two pages for the init code.
319 void free_hyp_pgds(void)
325 mutex_lock(&kvm_hyp_pgd_mutex);
328 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
329 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
330 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
331 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
337 mutex_unlock(&kvm_hyp_pgd_mutex);
340 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
341 unsigned long end, unsigned long pfn,
349 pte = pte_offset_kernel(pmd, addr);
350 kvm_set_pte(pte, pfn_pte(pfn, prot));
351 get_page(virt_to_page(pte));
352 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
354 } while (addr += PAGE_SIZE, addr != end);
357 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
358 unsigned long end, unsigned long pfn,
363 unsigned long addr, next;
367 pmd = pmd_offset(pud, addr);
369 BUG_ON(pmd_sect(*pmd));
371 if (pmd_none(*pmd)) {
372 pte = pte_alloc_one_kernel(NULL, addr);
374 kvm_err("Cannot allocate Hyp pte\n");
377 pmd_populate_kernel(NULL, pmd, pte);
378 get_page(virt_to_page(pmd));
379 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
382 next = pmd_addr_end(addr, end);
384 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
385 pfn += (next - addr) >> PAGE_SHIFT;
386 } while (addr = next, addr != end);
391 static int __create_hyp_mappings(pgd_t *pgdp,
392 unsigned long start, unsigned long end,
393 unsigned long pfn, pgprot_t prot)
398 unsigned long addr, next;
401 mutex_lock(&kvm_hyp_pgd_mutex);
402 addr = start & PAGE_MASK;
403 end = PAGE_ALIGN(end);
405 pgd = pgdp + pgd_index(addr);
406 pud = pud_offset(pgd, addr);
408 if (pud_none_or_clear_bad(pud)) {
409 pmd = pmd_alloc_one(NULL, addr);
411 kvm_err("Cannot allocate Hyp pmd\n");
415 pud_populate(NULL, pud, pmd);
416 get_page(virt_to_page(pud));
417 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
420 next = pgd_addr_end(addr, end);
421 err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
424 pfn += (next - addr) >> PAGE_SHIFT;
425 } while (addr = next, addr != end);
427 mutex_unlock(&kvm_hyp_pgd_mutex);
431 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
433 if (!is_vmalloc_addr(kaddr)) {
434 BUG_ON(!virt_addr_valid(kaddr));
437 return page_to_phys(vmalloc_to_page(kaddr)) +
438 offset_in_page(kaddr);
443 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
444 * @from: The virtual kernel start address of the range
445 * @to: The virtual kernel end address of the range (exclusive)
447 * The same virtual address as the kernel virtual address is also used
448 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
451 int create_hyp_mappings(void *from, void *to)
453 phys_addr_t phys_addr;
454 unsigned long virt_addr;
455 unsigned long start = KERN_TO_HYP((unsigned long)from);
456 unsigned long end = KERN_TO_HYP((unsigned long)to);
458 start = start & PAGE_MASK;
459 end = PAGE_ALIGN(end);
461 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
464 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
465 err = __create_hyp_mappings(hyp_pgd, virt_addr,
466 virt_addr + PAGE_SIZE,
467 __phys_to_pfn(phys_addr),
477 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
478 * @from: The kernel start VA of the range
479 * @to: The kernel end VA of the range (exclusive)
480 * @phys_addr: The physical start address which gets mapped
482 * The resulting HYP VA is the same as the kernel VA, modulo
485 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
487 unsigned long start = KERN_TO_HYP((unsigned long)from);
488 unsigned long end = KERN_TO_HYP((unsigned long)to);
490 /* Check for a valid kernel IO mapping */
491 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
494 return __create_hyp_mappings(hyp_pgd, start, end,
495 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
499 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
500 * @kvm: The KVM struct pointer for the VM.
502 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
503 * support either full 40-bit input addresses or limited to 32-bit input
504 * addresses). Clears the allocated pages.
506 * Note we don't need locking here as this is only called when the VM is
507 * created, which can only be done once.
509 int kvm_alloc_stage2_pgd(struct kvm *kvm)
513 if (kvm->arch.pgd != NULL) {
514 kvm_err("kvm_arch already initialized?\n");
518 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
522 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
530 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
531 * @kvm: The VM pointer
532 * @start: The intermediate physical base address of the range to unmap
533 * @size: The size of the area to unmap
535 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
536 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
537 * destroying the VM), otherwise another faulting VCPU may come in and mess
538 * with things behind our backs.
540 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
542 unmap_range(kvm, kvm->arch.pgd, start, size);
546 * kvm_free_stage2_pgd - free all stage-2 tables
547 * @kvm: The KVM struct pointer for the VM.
549 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
550 * underlying level-2 and level-3 tables before freeing the actual level-1 table
551 * and setting the struct pointer to NULL.
553 * Note we don't need locking here as this is only called when the VM is
554 * destroyed, which can only be done once.
556 void kvm_free_stage2_pgd(struct kvm *kvm)
558 if (kvm->arch.pgd == NULL)
561 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
562 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
563 kvm->arch.pgd = NULL;
566 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
573 pgd = kvm->arch.pgd + pgd_index(addr);
574 pud = pud_offset(pgd, addr);
575 if (pud_none(*pud)) {
578 pmd = mmu_memory_cache_alloc(cache);
579 pud_populate(NULL, pud, pmd);
580 get_page(virt_to_page(pud));
583 return pmd_offset(pud, addr);
586 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
587 *cache, phys_addr_t addr, const pmd_t *new_pmd)
591 pmd = stage2_get_pmd(kvm, cache, addr);
595 * Mapping in huge pages should only happen through a fault. If a
596 * page is merged into a transparent huge page, the individual
597 * subpages of that huge page should be unmapped through MMU
598 * notifiers before we get here.
600 * Merging of CompoundPages is not supported; they should become
601 * splitting first, unmapped, merged, and mapped back in on-demand.
603 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
606 kvm_set_pmd(pmd, *new_pmd);
607 if (pmd_present(old_pmd))
608 kvm_tlb_flush_vmid_ipa(kvm, addr);
610 get_page(virt_to_page(pmd));
614 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
615 phys_addr_t addr, const pte_t *new_pte, bool iomap)
620 /* Create stage-2 page table mapping - Level 1 */
621 pmd = stage2_get_pmd(kvm, cache, addr);
624 * Ignore calls from kvm_set_spte_hva for unallocated
630 /* Create stage-2 page mappings - Level 2 */
631 if (pmd_none(*pmd)) {
633 return 0; /* ignore calls from kvm_set_spte_hva */
634 pte = mmu_memory_cache_alloc(cache);
636 pmd_populate_kernel(NULL, pmd, pte);
637 get_page(virt_to_page(pmd));
640 pte = pte_offset_kernel(pmd, addr);
642 if (iomap && pte_present(*pte))
645 /* Create 2nd stage page table mapping - Level 3 */
647 kvm_set_pte(pte, *new_pte);
648 if (pte_present(old_pte))
649 kvm_tlb_flush_vmid_ipa(kvm, addr);
651 get_page(virt_to_page(pte));
657 * kvm_phys_addr_ioremap - map a device range to guest IPA
659 * @kvm: The KVM pointer
660 * @guest_ipa: The IPA at which to insert the mapping
661 * @pa: The physical address of the device
662 * @size: The size of the mapping
664 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
665 phys_addr_t pa, unsigned long size)
667 phys_addr_t addr, end;
670 struct kvm_mmu_memory_cache cache = { 0, };
672 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
673 pfn = __phys_to_pfn(pa);
675 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
676 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
678 ret = mmu_topup_memory_cache(&cache, 2, 2);
681 spin_lock(&kvm->mmu_lock);
682 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
683 spin_unlock(&kvm->mmu_lock);
691 mmu_free_memory_cache(&cache);
695 static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
698 gfn_t gfn = *ipap >> PAGE_SHIFT;
700 if (PageTransCompound(pfn_to_page(pfn))) {
703 * The address we faulted on is backed by a transparent huge
704 * page. However, because we map the compound huge page and
705 * not the individual tail page, we need to transfer the
706 * refcount to the head page. We have to be careful that the
707 * THP doesn't start to split while we are adjusting the
710 * We are sure this doesn't happen, because mmu_notifier_retry
711 * was successful and we are holding the mmu_lock, so if this
712 * THP is trying to split, it will be blocked in the mmu
713 * notifier before touching any of the pages, specifically
714 * before being able to call __split_huge_page_refcount().
716 * We can therefore safely transfer the refcount from PG_tail
717 * to PG_head and switch the pfn from a tail page to the head
720 mask = PTRS_PER_PMD - 1;
721 VM_BUG_ON((gfn & mask) != (pfn & mask));
724 kvm_release_pfn_clean(pfn);
736 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
737 struct kvm_memory_slot *memslot,
738 unsigned long fault_status)
741 bool write_fault, writable, hugetlb = false, force_pte = false;
742 unsigned long mmu_seq;
743 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
744 unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
745 struct kvm *kvm = vcpu->kvm;
746 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
747 struct vm_area_struct *vma;
750 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
751 if (fault_status == FSC_PERM && !write_fault) {
752 kvm_err("Unexpected L2 read permission error\n");
756 /* Let's check if we will get back a huge page backed by hugetlbfs */
757 down_read(¤t->mm->mmap_sem);
758 vma = find_vma_intersection(current->mm, hva, hva + 1);
759 if (is_vm_hugetlb_page(vma)) {
761 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
764 * Pages belonging to memslots that don't have the same
765 * alignment for userspace and IPA cannot be mapped using
766 * block descriptors even if the pages belong to a THP for
767 * the process, because the stage-2 block descriptor will
768 * cover more than a single THP and we loose atomicity for
769 * unmapping, updates, and splits of the THP or other pages
770 * in the stage-2 block range.
772 if ((memslot->userspace_addr & ~PMD_MASK) !=
773 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
776 up_read(¤t->mm->mmap_sem);
778 /* We need minimum second+third level pages */
779 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
783 mmu_seq = vcpu->kvm->mmu_notifier_seq;
785 * Ensure the read of mmu_notifier_seq happens before we call
786 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
787 * the page we just got a reference to gets unmapped before we have a
788 * chance to grab the mmu_lock, which ensure that if the page gets
789 * unmapped afterwards, the call to kvm_unmap_hva will take it away
790 * from us again properly. This smp_rmb() interacts with the smp_wmb()
791 * in kvm_mmu_notifier_invalidate_<page|range_end>.
795 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
796 if (is_error_pfn(pfn))
799 spin_lock(&kvm->mmu_lock);
800 if (mmu_notifier_retry(kvm, mmu_seq))
802 if (!hugetlb && !force_pte)
803 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
806 pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
807 new_pmd = pmd_mkhuge(new_pmd);
809 kvm_set_s2pmd_writable(&new_pmd);
810 kvm_set_pfn_dirty(pfn);
812 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
813 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
815 pte_t new_pte = pfn_pte(pfn, PAGE_S2);
817 kvm_set_s2pte_writable(&new_pte);
818 kvm_set_pfn_dirty(pfn);
820 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
821 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
826 spin_unlock(&kvm->mmu_lock);
827 kvm_release_pfn_clean(pfn);
832 * kvm_handle_guest_abort - handles all 2nd stage aborts
833 * @vcpu: the VCPU pointer
834 * @run: the kvm_run structure
836 * Any abort that gets to the host is almost guaranteed to be caused by a
837 * missing second stage translation table entry, which can mean that either the
838 * guest simply needs more memory and we must allocate an appropriate page or it
839 * can mean that the guest tried to access I/O memory, which is emulated by user
840 * space. The distinction is based on the IPA causing the fault and whether this
841 * memory region has been registered as standard RAM by user space.
843 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
845 unsigned long fault_status;
846 phys_addr_t fault_ipa;
847 struct kvm_memory_slot *memslot;
852 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
853 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
855 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
856 kvm_vcpu_get_hfar(vcpu), fault_ipa);
858 /* Check the stage-2 fault is trans. fault or write fault */
859 fault_status = kvm_vcpu_trap_get_fault(vcpu);
860 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
861 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
862 kvm_vcpu_trap_get_class(vcpu), fault_status);
866 idx = srcu_read_lock(&vcpu->kvm->srcu);
868 gfn = fault_ipa >> PAGE_SHIFT;
869 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
871 /* Prefetch Abort on I/O address */
872 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
877 if (fault_status != FSC_FAULT) {
878 kvm_err("Unsupported fault status on io memory: %#lx\n",
885 * The IPA is reported as [MAX:12], so we need to
886 * complement it with the bottom 12 bits from the
887 * faulting VA. This is always 12 bits, irrespective
890 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
891 ret = io_mem_abort(vcpu, run, fault_ipa);
895 memslot = gfn_to_memslot(vcpu->kvm, gfn);
897 ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
901 srcu_read_unlock(&vcpu->kvm->srcu, idx);
905 static void handle_hva_to_gpa(struct kvm *kvm,
908 void (*handler)(struct kvm *kvm,
909 gpa_t gpa, void *data),
912 struct kvm_memslots *slots;
913 struct kvm_memory_slot *memslot;
915 slots = kvm_memslots(kvm);
917 /* we only care about the pages that the guest sees */
918 kvm_for_each_memslot(memslot, slots) {
919 unsigned long hva_start, hva_end;
922 hva_start = max(start, memslot->userspace_addr);
923 hva_end = min(end, memslot->userspace_addr +
924 (memslot->npages << PAGE_SHIFT));
925 if (hva_start >= hva_end)
929 * {gfn(page) | page intersects with [hva_start, hva_end)} =
930 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
932 gfn = hva_to_gfn_memslot(hva_start, memslot);
933 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
935 for (; gfn < gfn_end; ++gfn) {
936 gpa_t gpa = gfn << PAGE_SHIFT;
937 handler(kvm, gpa, data);
942 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
944 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
947 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
949 unsigned long end = hva + PAGE_SIZE;
954 trace_kvm_unmap_hva(hva);
955 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
959 int kvm_unmap_hva_range(struct kvm *kvm,
960 unsigned long start, unsigned long end)
965 trace_kvm_unmap_hva_range(start, end);
966 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
970 static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
972 pte_t *pte = (pte_t *)data;
974 stage2_set_pte(kvm, NULL, gpa, pte, false);
978 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
980 unsigned long end = hva + PAGE_SIZE;
986 trace_kvm_set_spte_hva(hva);
987 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
988 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
991 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
993 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
996 phys_addr_t kvm_mmu_get_httbr(void)
998 return virt_to_phys(hyp_pgd);
1001 phys_addr_t kvm_mmu_get_boot_httbr(void)
1003 return virt_to_phys(boot_hyp_pgd);
1006 phys_addr_t kvm_get_idmap_vector(void)
1008 return hyp_idmap_vector;
1011 int kvm_mmu_init(void)
1015 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1016 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1017 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1019 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
1021 * Our init code is crossing a page boundary. Allocate
1022 * a bounce page, copy the code over and use that.
1024 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1025 phys_addr_t phys_base;
1027 init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
1028 if (!init_bounce_page) {
1029 kvm_err("Couldn't allocate HYP init bounce page\n");
1034 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1036 * Warning: the code we just copied to the bounce page
1037 * must be flushed to the point of coherency.
1038 * Otherwise, the data may be sitting in L2, and HYP
1039 * mode won't be able to observe it as it runs with
1040 * caches off at that point.
1042 kvm_flush_dcache_to_poc(init_bounce_page, len);
1044 phys_base = kvm_virt_to_phys(init_bounce_page);
1045 hyp_idmap_vector += phys_base - hyp_idmap_start;
1046 hyp_idmap_start = phys_base;
1047 hyp_idmap_end = phys_base + len;
1049 kvm_info("Using HYP init bounce page @%lx\n",
1050 (unsigned long)phys_base);
1053 hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
1054 boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
1055 if (!hyp_pgd || !boot_hyp_pgd) {
1056 kvm_err("Hyp mode PGD not allocated\n");
1061 /* Create the idmap in the boot page tables */
1062 err = __create_hyp_mappings(boot_hyp_pgd,
1063 hyp_idmap_start, hyp_idmap_end,
1064 __phys_to_pfn(hyp_idmap_start),
1068 kvm_err("Failed to idmap %lx-%lx\n",
1069 hyp_idmap_start, hyp_idmap_end);
1073 /* Map the very same page at the trampoline VA */
1074 err = __create_hyp_mappings(boot_hyp_pgd,
1075 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1076 __phys_to_pfn(hyp_idmap_start),
1079 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1084 /* Map the same page again into the runtime page tables */
1085 err = __create_hyp_mappings(hyp_pgd,
1086 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1087 __phys_to_pfn(hyp_idmap_start),
1090 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",