88203fa4ef0527a578fd113de12a24825603675e
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  * Copyright 2010 Red Hat, Inc. and/or its affilates.
11  *
12  * Authors:
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Avi Kivity   <avi@qumranet.com>
15  *
16  * This work is licensed under the terms of the GNU GPL, version 2.  See
17  * the COPYING file in the top-level directory.
18  *
19  */
20
21 #include "mmu.h"
22 #include "x86.h"
23 #include "kvm_cache_regs.h"
24
25 #include <linux/kvm_host.h>
26 #include <linux/types.h>
27 #include <linux/string.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/module.h>
31 #include <linux/swap.h>
32 #include <linux/hugetlb.h>
33 #include <linux/compiler.h>
34 #include <linux/srcu.h>
35 #include <linux/slab.h>
36 #include <linux/uaccess.h>
37
38 #include <asm/page.h>
39 #include <asm/cmpxchg.h>
40 #include <asm/io.h>
41 #include <asm/vmx.h>
42
43 /*
44  * When setting this variable to true it enables Two-Dimensional-Paging
45  * where the hardware walks 2 page tables:
46  * 1. the guest-virtual to guest-physical
47  * 2. while doing 1. it walks guest-physical to host-physical
48  * If the hardware supports that we don't need to do shadow paging.
49  */
50 bool tdp_enabled = false;
51
52 enum {
53         AUDIT_PRE_PAGE_FAULT,
54         AUDIT_POST_PAGE_FAULT,
55         AUDIT_PRE_PTE_WRITE,
56         AUDIT_POST_PTE_WRITE
57 };
58
59 char *audit_point_name[] = {
60         "pre page fault",
61         "post page fault",
62         "pre pte write",
63         "post pte write"
64 };
65
66 #undef MMU_DEBUG
67
68 #ifdef MMU_DEBUG
69
70 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
71 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
72
73 #else
74
75 #define pgprintk(x...) do { } while (0)
76 #define rmap_printk(x...) do { } while (0)
77
78 #endif
79
80 #ifdef MMU_DEBUG
81 static int dbg = 0;
82 module_param(dbg, bool, 0644);
83 #endif
84
85 static int oos_shadow = 1;
86 module_param(oos_shadow, bool, 0644);
87
88 #ifndef MMU_DEBUG
89 #define ASSERT(x) do { } while (0)
90 #else
91 #define ASSERT(x)                                                       \
92         if (!(x)) {                                                     \
93                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
94                        __FILE__, __LINE__, #x);                         \
95         }
96 #endif
97
98 #define PTE_PREFETCH_NUM                8
99
100 #define PT_FIRST_AVAIL_BITS_SHIFT 9
101 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
102
103 #define PT64_LEVEL_BITS 9
104
105 #define PT64_LEVEL_SHIFT(level) \
106                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
107
108 #define PT64_LEVEL_MASK(level) \
109                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
110
111 #define PT64_INDEX(address, level)\
112         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
113
114
115 #define PT32_LEVEL_BITS 10
116
117 #define PT32_LEVEL_SHIFT(level) \
118                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
119
120 #define PT32_LEVEL_MASK(level) \
121                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
122 #define PT32_LVL_OFFSET_MASK(level) \
123         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
124                                                 * PT32_LEVEL_BITS))) - 1))
125
126 #define PT32_INDEX(address, level)\
127         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
128
129
130 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
131 #define PT64_DIR_BASE_ADDR_MASK \
132         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
133 #define PT64_LVL_ADDR_MASK(level) \
134         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
135                                                 * PT64_LEVEL_BITS))) - 1))
136 #define PT64_LVL_OFFSET_MASK(level) \
137         (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
138                                                 * PT64_LEVEL_BITS))) - 1))
139
140 #define PT32_BASE_ADDR_MASK PAGE_MASK
141 #define PT32_DIR_BASE_ADDR_MASK \
142         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
143 #define PT32_LVL_ADDR_MASK(level) \
144         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
145                                             * PT32_LEVEL_BITS))) - 1))
146
147 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
148                         | PT64_NX_MASK)
149
150 #define RMAP_EXT 4
151
152 #define ACC_EXEC_MASK    1
153 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
154 #define ACC_USER_MASK    PT_USER_MASK
155 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
156
157 #include <trace/events/kvm.h>
158
159 #define CREATE_TRACE_POINTS
160 #include "mmutrace.h"
161
162 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
163
164 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
165
166 struct kvm_rmap_desc {
167         u64 *sptes[RMAP_EXT];
168         struct kvm_rmap_desc *more;
169 };
170
171 struct kvm_shadow_walk_iterator {
172         u64 addr;
173         hpa_t shadow_addr;
174         int level;
175         u64 *sptep;
176         unsigned index;
177 };
178
179 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
180         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
181              shadow_walk_okay(&(_walker));                      \
182              shadow_walk_next(&(_walker)))
183
184 typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
185
186 static struct kmem_cache *pte_chain_cache;
187 static struct kmem_cache *rmap_desc_cache;
188 static struct kmem_cache *mmu_page_header_cache;
189 static struct percpu_counter kvm_total_used_mmu_pages;
190
191 static u64 __read_mostly shadow_trap_nonpresent_pte;
192 static u64 __read_mostly shadow_notrap_nonpresent_pte;
193 static u64 __read_mostly shadow_base_present_pte;
194 static u64 __read_mostly shadow_nx_mask;
195 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
196 static u64 __read_mostly shadow_user_mask;
197 static u64 __read_mostly shadow_accessed_mask;
198 static u64 __read_mostly shadow_dirty_mask;
199
200 static inline u64 rsvd_bits(int s, int e)
201 {
202         return ((1ULL << (e - s + 1)) - 1) << s;
203 }
204
205 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
206 {
207         shadow_trap_nonpresent_pte = trap_pte;
208         shadow_notrap_nonpresent_pte = notrap_pte;
209 }
210 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
211
212 void kvm_mmu_set_base_ptes(u64 base_pte)
213 {
214         shadow_base_present_pte = base_pte;
215 }
216 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
217
218 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
219                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
220 {
221         shadow_user_mask = user_mask;
222         shadow_accessed_mask = accessed_mask;
223         shadow_dirty_mask = dirty_mask;
224         shadow_nx_mask = nx_mask;
225         shadow_x_mask = x_mask;
226 }
227 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
228
229 static bool is_write_protection(struct kvm_vcpu *vcpu)
230 {
231         return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
232 }
233
234 static int is_cpuid_PSE36(void)
235 {
236         return 1;
237 }
238
239 static int is_nx(struct kvm_vcpu *vcpu)
240 {
241         return vcpu->arch.efer & EFER_NX;
242 }
243
244 static int is_shadow_present_pte(u64 pte)
245 {
246         return pte != shadow_trap_nonpresent_pte
247                 && pte != shadow_notrap_nonpresent_pte;
248 }
249
250 static int is_large_pte(u64 pte)
251 {
252         return pte & PT_PAGE_SIZE_MASK;
253 }
254
255 static int is_writable_pte(unsigned long pte)
256 {
257         return pte & PT_WRITABLE_MASK;
258 }
259
260 static int is_dirty_gpte(unsigned long pte)
261 {
262         return pte & PT_DIRTY_MASK;
263 }
264
265 static int is_rmap_spte(u64 pte)
266 {
267         return is_shadow_present_pte(pte);
268 }
269
270 static int is_last_spte(u64 pte, int level)
271 {
272         if (level == PT_PAGE_TABLE_LEVEL)
273                 return 1;
274         if (is_large_pte(pte))
275                 return 1;
276         return 0;
277 }
278
279 static pfn_t spte_to_pfn(u64 pte)
280 {
281         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
282 }
283
284 static gfn_t pse36_gfn_delta(u32 gpte)
285 {
286         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
287
288         return (gpte & PT32_DIR_PSE36_MASK) << shift;
289 }
290
291 static void __set_spte(u64 *sptep, u64 spte)
292 {
293         set_64bit(sptep, spte);
294 }
295
296 static u64 __xchg_spte(u64 *sptep, u64 new_spte)
297 {
298 #ifdef CONFIG_X86_64
299         return xchg(sptep, new_spte);
300 #else
301         u64 old_spte;
302
303         do {
304                 old_spte = *sptep;
305         } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
306
307         return old_spte;
308 #endif
309 }
310
311 static bool spte_has_volatile_bits(u64 spte)
312 {
313         if (!shadow_accessed_mask)
314                 return false;
315
316         if (!is_shadow_present_pte(spte))
317                 return false;
318
319         if ((spte & shadow_accessed_mask) &&
320               (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
321                 return false;
322
323         return true;
324 }
325
326 static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
327 {
328         return (old_spte & bit_mask) && !(new_spte & bit_mask);
329 }
330
331 static void update_spte(u64 *sptep, u64 new_spte)
332 {
333         u64 mask, old_spte = *sptep;
334
335         WARN_ON(!is_rmap_spte(new_spte));
336
337         new_spte |= old_spte & shadow_dirty_mask;
338
339         mask = shadow_accessed_mask;
340         if (is_writable_pte(old_spte))
341                 mask |= shadow_dirty_mask;
342
343         if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
344                 __set_spte(sptep, new_spte);
345         else
346                 old_spte = __xchg_spte(sptep, new_spte);
347
348         if (!shadow_accessed_mask)
349                 return;
350
351         if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
352                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
353         if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
354                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
355 }
356
357 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
358                                   struct kmem_cache *base_cache, int min)
359 {
360         void *obj;
361
362         if (cache->nobjs >= min)
363                 return 0;
364         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
365                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
366                 if (!obj)
367                         return -ENOMEM;
368                 cache->objects[cache->nobjs++] = obj;
369         }
370         return 0;
371 }
372
373 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
374                                   struct kmem_cache *cache)
375 {
376         while (mc->nobjs)
377                 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
378 }
379
380 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
381                                        int min)
382 {
383         struct page *page;
384
385         if (cache->nobjs >= min)
386                 return 0;
387         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
388                 page = alloc_page(GFP_KERNEL);
389                 if (!page)
390                         return -ENOMEM;
391                 cache->objects[cache->nobjs++] = page_address(page);
392         }
393         return 0;
394 }
395
396 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
397 {
398         while (mc->nobjs)
399                 free_page((unsigned long)mc->objects[--mc->nobjs]);
400 }
401
402 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
403 {
404         int r;
405
406         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
407                                    pte_chain_cache, 4);
408         if (r)
409                 goto out;
410         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
411                                    rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
412         if (r)
413                 goto out;
414         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
415         if (r)
416                 goto out;
417         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
418                                    mmu_page_header_cache, 4);
419 out:
420         return r;
421 }
422
423 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
424 {
425         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
426         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
427         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
428         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
429                                 mmu_page_header_cache);
430 }
431
432 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
433                                     size_t size)
434 {
435         void *p;
436
437         BUG_ON(!mc->nobjs);
438         p = mc->objects[--mc->nobjs];
439         return p;
440 }
441
442 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
443 {
444         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
445                                       sizeof(struct kvm_pte_chain));
446 }
447
448 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
449 {
450         kmem_cache_free(pte_chain_cache, pc);
451 }
452
453 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
454 {
455         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
456                                       sizeof(struct kvm_rmap_desc));
457 }
458
459 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
460 {
461         kmem_cache_free(rmap_desc_cache, rd);
462 }
463
464 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
465 {
466         if (!sp->role.direct)
467                 return sp->gfns[index];
468
469         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
470 }
471
472 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
473 {
474         if (sp->role.direct)
475                 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
476         else
477                 sp->gfns[index] = gfn;
478 }
479
480 /*
481  * Return the pointer to the largepage write count for a given
482  * gfn, handling slots that are not large page aligned.
483  */
484 static int *slot_largepage_idx(gfn_t gfn,
485                                struct kvm_memory_slot *slot,
486                                int level)
487 {
488         unsigned long idx;
489
490         idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
491               (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
492         return &slot->lpage_info[level - 2][idx].write_count;
493 }
494
495 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
496 {
497         struct kvm_memory_slot *slot;
498         int *write_count;
499         int i;
500
501         slot = gfn_to_memslot(kvm, gfn);
502         for (i = PT_DIRECTORY_LEVEL;
503              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
504                 write_count   = slot_largepage_idx(gfn, slot, i);
505                 *write_count += 1;
506         }
507 }
508
509 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
510 {
511         struct kvm_memory_slot *slot;
512         int *write_count;
513         int i;
514
515         slot = gfn_to_memslot(kvm, gfn);
516         for (i = PT_DIRECTORY_LEVEL;
517              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
518                 write_count   = slot_largepage_idx(gfn, slot, i);
519                 *write_count -= 1;
520                 WARN_ON(*write_count < 0);
521         }
522 }
523
524 static int has_wrprotected_page(struct kvm *kvm,
525                                 gfn_t gfn,
526                                 int level)
527 {
528         struct kvm_memory_slot *slot;
529         int *largepage_idx;
530
531         slot = gfn_to_memslot(kvm, gfn);
532         if (slot) {
533                 largepage_idx = slot_largepage_idx(gfn, slot, level);
534                 return *largepage_idx;
535         }
536
537         return 1;
538 }
539
540 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
541 {
542         unsigned long page_size;
543         int i, ret = 0;
544
545         page_size = kvm_host_page_size(kvm, gfn);
546
547         for (i = PT_PAGE_TABLE_LEVEL;
548              i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
549                 if (page_size >= KVM_HPAGE_SIZE(i))
550                         ret = i;
551                 else
552                         break;
553         }
554
555         return ret;
556 }
557
558 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
559 {
560         struct kvm_memory_slot *slot;
561         int host_level, level, max_level;
562
563         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
564         if (slot && slot->dirty_bitmap)
565                 return PT_PAGE_TABLE_LEVEL;
566
567         host_level = host_mapping_level(vcpu->kvm, large_gfn);
568
569         if (host_level == PT_PAGE_TABLE_LEVEL)
570                 return host_level;
571
572         max_level = kvm_x86_ops->get_lpage_level() < host_level ?
573                 kvm_x86_ops->get_lpage_level() : host_level;
574
575         for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
576                 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
577                         break;
578
579         return level - 1;
580 }
581
582 /*
583  * Take gfn and return the reverse mapping to it.
584  */
585
586 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
587 {
588         struct kvm_memory_slot *slot;
589         unsigned long idx;
590
591         slot = gfn_to_memslot(kvm, gfn);
592         if (likely(level == PT_PAGE_TABLE_LEVEL))
593                 return &slot->rmap[gfn - slot->base_gfn];
594
595         idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
596                 (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
597
598         return &slot->lpage_info[level - 2][idx].rmap_pde;
599 }
600
601 /*
602  * Reverse mapping data structures:
603  *
604  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
605  * that points to page_address(page).
606  *
607  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
608  * containing more mappings.
609  *
610  * Returns the number of rmap entries before the spte was added or zero if
611  * the spte was not added.
612  *
613  */
614 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
615 {
616         struct kvm_mmu_page *sp;
617         struct kvm_rmap_desc *desc;
618         unsigned long *rmapp;
619         int i, count = 0;
620
621         if (!is_rmap_spte(*spte))
622                 return count;
623         sp = page_header(__pa(spte));
624         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
625         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
626         if (!*rmapp) {
627                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
628                 *rmapp = (unsigned long)spte;
629         } else if (!(*rmapp & 1)) {
630                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
631                 desc = mmu_alloc_rmap_desc(vcpu);
632                 desc->sptes[0] = (u64 *)*rmapp;
633                 desc->sptes[1] = spte;
634                 *rmapp = (unsigned long)desc | 1;
635                 ++count;
636         } else {
637                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
638                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
639                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
640                         desc = desc->more;
641                         count += RMAP_EXT;
642                 }
643                 if (desc->sptes[RMAP_EXT-1]) {
644                         desc->more = mmu_alloc_rmap_desc(vcpu);
645                         desc = desc->more;
646                 }
647                 for (i = 0; desc->sptes[i]; ++i)
648                         ++count;
649                 desc->sptes[i] = spte;
650         }
651         return count;
652 }
653
654 static void rmap_desc_remove_entry(unsigned long *rmapp,
655                                    struct kvm_rmap_desc *desc,
656                                    int i,
657                                    struct kvm_rmap_desc *prev_desc)
658 {
659         int j;
660
661         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
662                 ;
663         desc->sptes[i] = desc->sptes[j];
664         desc->sptes[j] = NULL;
665         if (j != 0)
666                 return;
667         if (!prev_desc && !desc->more)
668                 *rmapp = (unsigned long)desc->sptes[0];
669         else
670                 if (prev_desc)
671                         prev_desc->more = desc->more;
672                 else
673                         *rmapp = (unsigned long)desc->more | 1;
674         mmu_free_rmap_desc(desc);
675 }
676
677 static void rmap_remove(struct kvm *kvm, u64 *spte)
678 {
679         struct kvm_rmap_desc *desc;
680         struct kvm_rmap_desc *prev_desc;
681         struct kvm_mmu_page *sp;
682         gfn_t gfn;
683         unsigned long *rmapp;
684         int i;
685
686         sp = page_header(__pa(spte));
687         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
688         rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
689         if (!*rmapp) {
690                 printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
691                 BUG();
692         } else if (!(*rmapp & 1)) {
693                 rmap_printk("rmap_remove:  %p 1->0\n", spte);
694                 if ((u64 *)*rmapp != spte) {
695                         printk(KERN_ERR "rmap_remove:  %p 1->BUG\n", spte);
696                         BUG();
697                 }
698                 *rmapp = 0;
699         } else {
700                 rmap_printk("rmap_remove:  %p many->many\n", spte);
701                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
702                 prev_desc = NULL;
703                 while (desc) {
704                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
705                                 if (desc->sptes[i] == spte) {
706                                         rmap_desc_remove_entry(rmapp,
707                                                                desc, i,
708                                                                prev_desc);
709                                         return;
710                                 }
711                         prev_desc = desc;
712                         desc = desc->more;
713                 }
714                 pr_err("rmap_remove: %p many->many\n", spte);
715                 BUG();
716         }
717 }
718
719 static void set_spte_track_bits(u64 *sptep, u64 new_spte)
720 {
721         pfn_t pfn;
722         u64 old_spte = *sptep;
723
724         if (!spte_has_volatile_bits(old_spte))
725                 __set_spte(sptep, new_spte);
726         else
727                 old_spte = __xchg_spte(sptep, new_spte);
728
729         if (!is_rmap_spte(old_spte))
730                 return;
731
732         pfn = spte_to_pfn(old_spte);
733         if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
734                 kvm_set_pfn_accessed(pfn);
735         if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
736                 kvm_set_pfn_dirty(pfn);
737 }
738
739 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
740 {
741         set_spte_track_bits(sptep, new_spte);
742         rmap_remove(kvm, sptep);
743 }
744
745 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
746 {
747         struct kvm_rmap_desc *desc;
748         u64 *prev_spte;
749         int i;
750
751         if (!*rmapp)
752                 return NULL;
753         else if (!(*rmapp & 1)) {
754                 if (!spte)
755                         return (u64 *)*rmapp;
756                 return NULL;
757         }
758         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
759         prev_spte = NULL;
760         while (desc) {
761                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
762                         if (prev_spte == spte)
763                                 return desc->sptes[i];
764                         prev_spte = desc->sptes[i];
765                 }
766                 desc = desc->more;
767         }
768         return NULL;
769 }
770
771 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
772 {
773         unsigned long *rmapp;
774         u64 *spte;
775         int i, write_protected = 0;
776
777         rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
778
779         spte = rmap_next(kvm, rmapp, NULL);
780         while (spte) {
781                 BUG_ON(!spte);
782                 BUG_ON(!(*spte & PT_PRESENT_MASK));
783                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
784                 if (is_writable_pte(*spte)) {
785                         update_spte(spte, *spte & ~PT_WRITABLE_MASK);
786                         write_protected = 1;
787                 }
788                 spte = rmap_next(kvm, rmapp, spte);
789         }
790
791         /* check for huge page mappings */
792         for (i = PT_DIRECTORY_LEVEL;
793              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
794                 rmapp = gfn_to_rmap(kvm, gfn, i);
795                 spte = rmap_next(kvm, rmapp, NULL);
796                 while (spte) {
797                         BUG_ON(!spte);
798                         BUG_ON(!(*spte & PT_PRESENT_MASK));
799                         BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
800                         pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
801                         if (is_writable_pte(*spte)) {
802                                 drop_spte(kvm, spte,
803                                           shadow_trap_nonpresent_pte);
804                                 --kvm->stat.lpages;
805                                 spte = NULL;
806                                 write_protected = 1;
807                         }
808                         spte = rmap_next(kvm, rmapp, spte);
809                 }
810         }
811
812         return write_protected;
813 }
814
815 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
816                            unsigned long data)
817 {
818         u64 *spte;
819         int need_tlb_flush = 0;
820
821         while ((spte = rmap_next(kvm, rmapp, NULL))) {
822                 BUG_ON(!(*spte & PT_PRESENT_MASK));
823                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
824                 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
825                 need_tlb_flush = 1;
826         }
827         return need_tlb_flush;
828 }
829
830 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
831                              unsigned long data)
832 {
833         int need_flush = 0;
834         u64 *spte, new_spte;
835         pte_t *ptep = (pte_t *)data;
836         pfn_t new_pfn;
837
838         WARN_ON(pte_huge(*ptep));
839         new_pfn = pte_pfn(*ptep);
840         spte = rmap_next(kvm, rmapp, NULL);
841         while (spte) {
842                 BUG_ON(!is_shadow_present_pte(*spte));
843                 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
844                 need_flush = 1;
845                 if (pte_write(*ptep)) {
846                         drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
847                         spte = rmap_next(kvm, rmapp, NULL);
848                 } else {
849                         new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
850                         new_spte |= (u64)new_pfn << PAGE_SHIFT;
851
852                         new_spte &= ~PT_WRITABLE_MASK;
853                         new_spte &= ~SPTE_HOST_WRITEABLE;
854                         new_spte &= ~shadow_accessed_mask;
855                         set_spte_track_bits(spte, new_spte);
856                         spte = rmap_next(kvm, rmapp, spte);
857                 }
858         }
859         if (need_flush)
860                 kvm_flush_remote_tlbs(kvm);
861
862         return 0;
863 }
864
865 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
866                           unsigned long data,
867                           int (*handler)(struct kvm *kvm, unsigned long *rmapp,
868                                          unsigned long data))
869 {
870         int i, j;
871         int ret;
872         int retval = 0;
873         struct kvm_memslots *slots;
874
875         slots = kvm_memslots(kvm);
876
877         for (i = 0; i < slots->nmemslots; i++) {
878                 struct kvm_memory_slot *memslot = &slots->memslots[i];
879                 unsigned long start = memslot->userspace_addr;
880                 unsigned long end;
881
882                 end = start + (memslot->npages << PAGE_SHIFT);
883                 if (hva >= start && hva < end) {
884                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
885
886                         ret = handler(kvm, &memslot->rmap[gfn_offset], data);
887
888                         for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
889                                 unsigned long idx;
890                                 int sh;
891
892                                 sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
893                                 idx = ((memslot->base_gfn+gfn_offset) >> sh) -
894                                         (memslot->base_gfn >> sh);
895                                 ret |= handler(kvm,
896                                         &memslot->lpage_info[j][idx].rmap_pde,
897                                         data);
898                         }
899                         trace_kvm_age_page(hva, memslot, ret);
900                         retval |= ret;
901                 }
902         }
903
904         return retval;
905 }
906
907 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
908 {
909         return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
910 }
911
912 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
913 {
914         kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
915 }
916
917 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
918                          unsigned long data)
919 {
920         u64 *spte;
921         int young = 0;
922
923         /*
924          * Emulate the accessed bit for EPT, by checking if this page has
925          * an EPT mapping, and clearing it if it does. On the next access,
926          * a new EPT mapping will be established.
927          * This has some overhead, but not as much as the cost of swapping
928          * out actively used pages or breaking up actively used hugepages.
929          */
930         if (!shadow_accessed_mask)
931                 return kvm_unmap_rmapp(kvm, rmapp, data);
932
933         spte = rmap_next(kvm, rmapp, NULL);
934         while (spte) {
935                 int _young;
936                 u64 _spte = *spte;
937                 BUG_ON(!(_spte & PT_PRESENT_MASK));
938                 _young = _spte & PT_ACCESSED_MASK;
939                 if (_young) {
940                         young = 1;
941                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
942                 }
943                 spte = rmap_next(kvm, rmapp, spte);
944         }
945         return young;
946 }
947
948 #define RMAP_RECYCLE_THRESHOLD 1000
949
950 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
951 {
952         unsigned long *rmapp;
953         struct kvm_mmu_page *sp;
954
955         sp = page_header(__pa(spte));
956
957         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
958
959         kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
960         kvm_flush_remote_tlbs(vcpu->kvm);
961 }
962
963 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
964 {
965         return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
966 }
967
968 #ifdef MMU_DEBUG
969 static int is_empty_shadow_page(u64 *spt)
970 {
971         u64 *pos;
972         u64 *end;
973
974         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
975                 if (is_shadow_present_pte(*pos)) {
976                         printk(KERN_ERR "%s: %p %llx\n", __func__,
977                                pos, *pos);
978                         return 0;
979                 }
980         return 1;
981 }
982 #endif
983
984 /*
985  * This value is the sum of all of the kvm instances's
986  * kvm->arch.n_used_mmu_pages values.  We need a global,
987  * aggregate version in order to make the slab shrinker
988  * faster
989  */
990 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
991 {
992         kvm->arch.n_used_mmu_pages += nr;
993         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
994 }
995
996 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
997 {
998         ASSERT(is_empty_shadow_page(sp->spt));
999         hlist_del(&sp->hash_link);
1000         list_del(&sp->link);
1001         __free_page(virt_to_page(sp->spt));
1002         if (!sp->role.direct)
1003                 __free_page(virt_to_page(sp->gfns));
1004         kmem_cache_free(mmu_page_header_cache, sp);
1005         kvm_mod_used_mmu_pages(kvm, -1);
1006 }
1007
1008 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1009 {
1010         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
1011 }
1012
1013 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
1014                                                u64 *parent_pte, int direct)
1015 {
1016         struct kvm_mmu_page *sp;
1017
1018         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
1019         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
1020         if (!direct)
1021                 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
1022                                                   PAGE_SIZE);
1023         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1024         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1025         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
1026         sp->multimapped = 0;
1027         sp->parent_pte = parent_pte;
1028         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1029         return sp;
1030 }
1031
1032 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1033                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1034 {
1035         struct kvm_pte_chain *pte_chain;
1036         struct hlist_node *node;
1037         int i;
1038
1039         if (!parent_pte)
1040                 return;
1041         if (!sp->multimapped) {
1042                 u64 *old = sp->parent_pte;
1043
1044                 if (!old) {
1045                         sp->parent_pte = parent_pte;
1046                         return;
1047                 }
1048                 sp->multimapped = 1;
1049                 pte_chain = mmu_alloc_pte_chain(vcpu);
1050                 INIT_HLIST_HEAD(&sp->parent_ptes);
1051                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1052                 pte_chain->parent_ptes[0] = old;
1053         }
1054         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
1055                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
1056                         continue;
1057                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
1058                         if (!pte_chain->parent_ptes[i]) {
1059                                 pte_chain->parent_ptes[i] = parent_pte;
1060                                 return;
1061                         }
1062         }
1063         pte_chain = mmu_alloc_pte_chain(vcpu);
1064         BUG_ON(!pte_chain);
1065         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1066         pte_chain->parent_ptes[0] = parent_pte;
1067 }
1068
1069 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1070                                        u64 *parent_pte)
1071 {
1072         struct kvm_pte_chain *pte_chain;
1073         struct hlist_node *node;
1074         int i;
1075
1076         if (!sp->multimapped) {
1077                 BUG_ON(sp->parent_pte != parent_pte);
1078                 sp->parent_pte = NULL;
1079                 return;
1080         }
1081         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1082                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1083                         if (!pte_chain->parent_ptes[i])
1084                                 break;
1085                         if (pte_chain->parent_ptes[i] != parent_pte)
1086                                 continue;
1087                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
1088                                 && pte_chain->parent_ptes[i + 1]) {
1089                                 pte_chain->parent_ptes[i]
1090                                         = pte_chain->parent_ptes[i + 1];
1091                                 ++i;
1092                         }
1093                         pte_chain->parent_ptes[i] = NULL;
1094                         if (i == 0) {
1095                                 hlist_del(&pte_chain->link);
1096                                 mmu_free_pte_chain(pte_chain);
1097                                 if (hlist_empty(&sp->parent_ptes)) {
1098                                         sp->multimapped = 0;
1099                                         sp->parent_pte = NULL;
1100                                 }
1101                         }
1102                         return;
1103                 }
1104         BUG();
1105 }
1106
1107 static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
1108 {
1109         struct kvm_pte_chain *pte_chain;
1110         struct hlist_node *node;
1111         struct kvm_mmu_page *parent_sp;
1112         int i;
1113
1114         if (!sp->multimapped && sp->parent_pte) {
1115                 parent_sp = page_header(__pa(sp->parent_pte));
1116                 fn(parent_sp, sp->parent_pte);
1117                 return;
1118         }
1119
1120         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1121                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1122                         u64 *spte = pte_chain->parent_ptes[i];
1123
1124                         if (!spte)
1125                                 break;
1126                         parent_sp = page_header(__pa(spte));
1127                         fn(parent_sp, spte);
1128                 }
1129 }
1130
1131 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
1132 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1133 {
1134         mmu_parent_walk(sp, mark_unsync);
1135 }
1136
1137 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
1138 {
1139         unsigned int index;
1140
1141         index = spte - sp->spt;
1142         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1143                 return;
1144         if (sp->unsync_children++)
1145                 return;
1146         kvm_mmu_mark_parents_unsync(sp);
1147 }
1148
1149 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1150                                     struct kvm_mmu_page *sp)
1151 {
1152         int i;
1153
1154         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1155                 sp->spt[i] = shadow_trap_nonpresent_pte;
1156 }
1157
1158 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1159                                struct kvm_mmu_page *sp, bool clear_unsync)
1160 {
1161         return 1;
1162 }
1163
1164 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1165 {
1166 }
1167
1168 #define KVM_PAGE_ARRAY_NR 16
1169
1170 struct kvm_mmu_pages {
1171         struct mmu_page_and_offset {
1172                 struct kvm_mmu_page *sp;
1173                 unsigned int idx;
1174         } page[KVM_PAGE_ARRAY_NR];
1175         unsigned int nr;
1176 };
1177
1178 #define for_each_unsync_children(bitmap, idx)           \
1179         for (idx = find_first_bit(bitmap, 512);         \
1180              idx < 512;                                 \
1181              idx = find_next_bit(bitmap, 512, idx+1))
1182
1183 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1184                          int idx)
1185 {
1186         int i;
1187
1188         if (sp->unsync)
1189                 for (i=0; i < pvec->nr; i++)
1190                         if (pvec->page[i].sp == sp)
1191                                 return 0;
1192
1193         pvec->page[pvec->nr].sp = sp;
1194         pvec->page[pvec->nr].idx = idx;
1195         pvec->nr++;
1196         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1197 }
1198
1199 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1200                            struct kvm_mmu_pages *pvec)
1201 {
1202         int i, ret, nr_unsync_leaf = 0;
1203
1204         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1205                 struct kvm_mmu_page *child;
1206                 u64 ent = sp->spt[i];
1207
1208                 if (!is_shadow_present_pte(ent) || is_large_pte(ent))
1209                         goto clear_child_bitmap;
1210
1211                 child = page_header(ent & PT64_BASE_ADDR_MASK);
1212
1213                 if (child->unsync_children) {
1214                         if (mmu_pages_add(pvec, child, i))
1215                                 return -ENOSPC;
1216
1217                         ret = __mmu_unsync_walk(child, pvec);
1218                         if (!ret)
1219                                 goto clear_child_bitmap;
1220                         else if (ret > 0)
1221                                 nr_unsync_leaf += ret;
1222                         else
1223                                 return ret;
1224                 } else if (child->unsync) {
1225                         nr_unsync_leaf++;
1226                         if (mmu_pages_add(pvec, child, i))
1227                                 return -ENOSPC;
1228                 } else
1229                          goto clear_child_bitmap;
1230
1231                 continue;
1232
1233 clear_child_bitmap:
1234                 __clear_bit(i, sp->unsync_child_bitmap);
1235                 sp->unsync_children--;
1236                 WARN_ON((int)sp->unsync_children < 0);
1237         }
1238
1239
1240         return nr_unsync_leaf;
1241 }
1242
1243 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1244                            struct kvm_mmu_pages *pvec)
1245 {
1246         if (!sp->unsync_children)
1247                 return 0;
1248
1249         mmu_pages_add(pvec, sp, 0);
1250         return __mmu_unsync_walk(sp, pvec);
1251 }
1252
1253 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1254 {
1255         WARN_ON(!sp->unsync);
1256         trace_kvm_mmu_sync_page(sp);
1257         sp->unsync = 0;
1258         --kvm->stat.mmu_unsync;
1259 }
1260
1261 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1262                                     struct list_head *invalid_list);
1263 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1264                                     struct list_head *invalid_list);
1265
1266 #define for_each_gfn_sp(kvm, sp, gfn, pos)                              \
1267   hlist_for_each_entry(sp, pos,                                         \
1268    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)   \
1269         if ((sp)->gfn != (gfn)) {} else
1270
1271 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos)               \
1272   hlist_for_each_entry(sp, pos,                                         \
1273    &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link)   \
1274                 if ((sp)->gfn != (gfn) || (sp)->role.direct ||          \
1275                         (sp)->role.invalid) {} else
1276
1277 /* @sp->gfn should be write-protected at the call site */
1278 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1279                            struct list_head *invalid_list, bool clear_unsync)
1280 {
1281         if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1282                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1283                 return 1;
1284         }
1285
1286         if (clear_unsync)
1287                 kvm_unlink_unsync_page(vcpu->kvm, sp);
1288
1289         if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
1290                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1291                 return 1;
1292         }
1293
1294         kvm_mmu_flush_tlb(vcpu);
1295         return 0;
1296 }
1297
1298 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1299                                    struct kvm_mmu_page *sp)
1300 {
1301         LIST_HEAD(invalid_list);
1302         int ret;
1303
1304         ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
1305         if (ret)
1306                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1307
1308         return ret;
1309 }
1310
1311 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1312                          struct list_head *invalid_list)
1313 {
1314         return __kvm_sync_page(vcpu, sp, invalid_list, true);
1315 }
1316
1317 /* @gfn should be write-protected at the call site */
1318 static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1319 {
1320         struct kvm_mmu_page *s;
1321         struct hlist_node *node;
1322         LIST_HEAD(invalid_list);
1323         bool flush = false;
1324
1325         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1326                 if (!s->unsync)
1327                         continue;
1328
1329                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1330                 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1331                         (vcpu->arch.mmu.sync_page(vcpu, s, true))) {
1332                         kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1333                         continue;
1334                 }
1335                 kvm_unlink_unsync_page(vcpu->kvm, s);
1336                 flush = true;
1337         }
1338
1339         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1340         if (flush)
1341                 kvm_mmu_flush_tlb(vcpu);
1342 }
1343
1344 struct mmu_page_path {
1345         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1346         unsigned int idx[PT64_ROOT_LEVEL-1];
1347 };
1348
1349 #define for_each_sp(pvec, sp, parents, i)                       \
1350                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1351                         sp = pvec.page[i].sp;                   \
1352                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1353                         i = mmu_pages_next(&pvec, &parents, i))
1354
1355 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1356                           struct mmu_page_path *parents,
1357                           int i)
1358 {
1359         int n;
1360
1361         for (n = i+1; n < pvec->nr; n++) {
1362                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1363
1364                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1365                         parents->idx[0] = pvec->page[n].idx;
1366                         return n;
1367                 }
1368
1369                 parents->parent[sp->role.level-2] = sp;
1370                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1371         }
1372
1373         return n;
1374 }
1375
1376 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1377 {
1378         struct kvm_mmu_page *sp;
1379         unsigned int level = 0;
1380
1381         do {
1382                 unsigned int idx = parents->idx[level];
1383
1384                 sp = parents->parent[level];
1385                 if (!sp)
1386                         return;
1387
1388                 --sp->unsync_children;
1389                 WARN_ON((int)sp->unsync_children < 0);
1390                 __clear_bit(idx, sp->unsync_child_bitmap);
1391                 level++;
1392         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1393 }
1394
1395 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1396                                struct mmu_page_path *parents,
1397                                struct kvm_mmu_pages *pvec)
1398 {
1399         parents->parent[parent->role.level-1] = NULL;
1400         pvec->nr = 0;
1401 }
1402
1403 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1404                               struct kvm_mmu_page *parent)
1405 {
1406         int i;
1407         struct kvm_mmu_page *sp;
1408         struct mmu_page_path parents;
1409         struct kvm_mmu_pages pages;
1410         LIST_HEAD(invalid_list);
1411
1412         kvm_mmu_pages_init(parent, &parents, &pages);
1413         while (mmu_unsync_walk(parent, &pages)) {
1414                 int protected = 0;
1415
1416                 for_each_sp(pages, sp, parents, i)
1417                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1418
1419                 if (protected)
1420                         kvm_flush_remote_tlbs(vcpu->kvm);
1421
1422                 for_each_sp(pages, sp, parents, i) {
1423                         kvm_sync_page(vcpu, sp, &invalid_list);
1424                         mmu_pages_clear_parents(&parents);
1425                 }
1426                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1427                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1428                 kvm_mmu_pages_init(parent, &parents, &pages);
1429         }
1430 }
1431
1432 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1433                                              gfn_t gfn,
1434                                              gva_t gaddr,
1435                                              unsigned level,
1436                                              int direct,
1437                                              unsigned access,
1438                                              u64 *parent_pte)
1439 {
1440         union kvm_mmu_page_role role;
1441         unsigned quadrant;
1442         struct kvm_mmu_page *sp;
1443         struct hlist_node *node;
1444         bool need_sync = false;
1445
1446         role = vcpu->arch.mmu.base_role;
1447         role.level = level;
1448         role.direct = direct;
1449         if (role.direct)
1450                 role.cr4_pae = 0;
1451         role.access = access;
1452         if (!vcpu->arch.mmu.direct_map
1453             && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1454                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1455                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1456                 role.quadrant = quadrant;
1457         }
1458         for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
1459                 if (!need_sync && sp->unsync)
1460                         need_sync = true;
1461
1462                 if (sp->role.word != role.word)
1463                         continue;
1464
1465                 if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
1466                         break;
1467
1468                 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1469                 if (sp->unsync_children) {
1470                         kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1471                         kvm_mmu_mark_parents_unsync(sp);
1472                 } else if (sp->unsync)
1473                         kvm_mmu_mark_parents_unsync(sp);
1474
1475                 trace_kvm_mmu_get_page(sp, false);
1476                 return sp;
1477         }
1478         ++vcpu->kvm->stat.mmu_cache_miss;
1479         sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
1480         if (!sp)
1481                 return sp;
1482         sp->gfn = gfn;
1483         sp->role = role;
1484         hlist_add_head(&sp->hash_link,
1485                 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
1486         if (!direct) {
1487                 if (rmap_write_protect(vcpu->kvm, gfn))
1488                         kvm_flush_remote_tlbs(vcpu->kvm);
1489                 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
1490                         kvm_sync_pages(vcpu, gfn);
1491
1492                 account_shadowed(vcpu->kvm, gfn);
1493         }
1494         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1495                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1496         else
1497                 nonpaging_prefetch_page(vcpu, sp);
1498         trace_kvm_mmu_get_page(sp, true);
1499         return sp;
1500 }
1501
1502 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1503                              struct kvm_vcpu *vcpu, u64 addr)
1504 {
1505         iterator->addr = addr;
1506         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1507         iterator->level = vcpu->arch.mmu.shadow_root_level;
1508
1509         if (iterator->level == PT64_ROOT_LEVEL &&
1510             vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
1511             !vcpu->arch.mmu.direct_map)
1512                 --iterator->level;
1513
1514         if (iterator->level == PT32E_ROOT_LEVEL) {
1515                 iterator->shadow_addr
1516                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1517                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1518                 --iterator->level;
1519                 if (!iterator->shadow_addr)
1520                         iterator->level = 0;
1521         }
1522 }
1523
1524 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1525 {
1526         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1527                 return false;
1528
1529         if (iterator->level == PT_PAGE_TABLE_LEVEL)
1530                 if (is_large_pte(*iterator->sptep))
1531                         return false;
1532
1533         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1534         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1535         return true;
1536 }
1537
1538 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1539 {
1540         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1541         --iterator->level;
1542 }
1543
1544 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
1545 {
1546         u64 spte;
1547
1548         spte = __pa(sp->spt)
1549                 | PT_PRESENT_MASK | PT_ACCESSED_MASK
1550                 | PT_WRITABLE_MASK | PT_USER_MASK;
1551         __set_spte(sptep, spte);
1552 }
1553
1554 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1555 {
1556         if (is_large_pte(*sptep)) {
1557                 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1558                 kvm_flush_remote_tlbs(vcpu->kvm);
1559         }
1560 }
1561
1562 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1563                                    unsigned direct_access)
1564 {
1565         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
1566                 struct kvm_mmu_page *child;
1567
1568                 /*
1569                  * For the direct sp, if the guest pte's dirty bit
1570                  * changed form clean to dirty, it will corrupt the
1571                  * sp's access: allow writable in the read-only sp,
1572                  * so we should update the spte at this point to get
1573                  * a new sp with the correct access.
1574                  */
1575                 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
1576                 if (child->role.access == direct_access)
1577                         return;
1578
1579                 mmu_page_remove_parent_pte(child, sptep);
1580                 __set_spte(sptep, shadow_trap_nonpresent_pte);
1581                 kvm_flush_remote_tlbs(vcpu->kvm);
1582         }
1583 }
1584
1585 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1586                                          struct kvm_mmu_page *sp)
1587 {
1588         unsigned i;
1589         u64 *pt;
1590         u64 ent;
1591
1592         pt = sp->spt;
1593
1594         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1595                 ent = pt[i];
1596
1597                 if (is_shadow_present_pte(ent)) {
1598                         if (!is_last_spte(ent, sp->role.level)) {
1599                                 ent &= PT64_BASE_ADDR_MASK;
1600                                 mmu_page_remove_parent_pte(page_header(ent),
1601                                                            &pt[i]);
1602                         } else {
1603                                 if (is_large_pte(ent))
1604                                         --kvm->stat.lpages;
1605                                 drop_spte(kvm, &pt[i],
1606                                           shadow_trap_nonpresent_pte);
1607                         }
1608                 }
1609                 pt[i] = shadow_trap_nonpresent_pte;
1610         }
1611 }
1612
1613 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1614 {
1615         mmu_page_remove_parent_pte(sp, parent_pte);
1616 }
1617
1618 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1619 {
1620         int i;
1621         struct kvm_vcpu *vcpu;
1622
1623         kvm_for_each_vcpu(i, vcpu, kvm)
1624                 vcpu->arch.last_pte_updated = NULL;
1625 }
1626
1627 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1628 {
1629         u64 *parent_pte;
1630
1631         while (sp->multimapped || sp->parent_pte) {
1632                 if (!sp->multimapped)
1633                         parent_pte = sp->parent_pte;
1634                 else {
1635                         struct kvm_pte_chain *chain;
1636
1637                         chain = container_of(sp->parent_ptes.first,
1638                                              struct kvm_pte_chain, link);
1639                         parent_pte = chain->parent_ptes[0];
1640                 }
1641                 BUG_ON(!parent_pte);
1642                 kvm_mmu_put_page(sp, parent_pte);
1643                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1644         }
1645 }
1646
1647 static int mmu_zap_unsync_children(struct kvm *kvm,
1648                                    struct kvm_mmu_page *parent,
1649                                    struct list_head *invalid_list)
1650 {
1651         int i, zapped = 0;
1652         struct mmu_page_path parents;
1653         struct kvm_mmu_pages pages;
1654
1655         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1656                 return 0;
1657
1658         kvm_mmu_pages_init(parent, &parents, &pages);
1659         while (mmu_unsync_walk(parent, &pages)) {
1660                 struct kvm_mmu_page *sp;
1661
1662                 for_each_sp(pages, sp, parents, i) {
1663                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
1664                         mmu_pages_clear_parents(&parents);
1665                         zapped++;
1666                 }
1667                 kvm_mmu_pages_init(parent, &parents, &pages);
1668         }
1669
1670         return zapped;
1671 }
1672
1673 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1674                                     struct list_head *invalid_list)
1675 {
1676         int ret;
1677
1678         trace_kvm_mmu_prepare_zap_page(sp);
1679         ++kvm->stat.mmu_shadow_zapped;
1680         ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
1681         kvm_mmu_page_unlink_children(kvm, sp);
1682         kvm_mmu_unlink_parents(kvm, sp);
1683         if (!sp->role.invalid && !sp->role.direct)
1684                 unaccount_shadowed(kvm, sp->gfn);
1685         if (sp->unsync)
1686                 kvm_unlink_unsync_page(kvm, sp);
1687         if (!sp->root_count) {
1688                 /* Count self */
1689                 ret++;
1690                 list_move(&sp->link, invalid_list);
1691         } else {
1692                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1693                 kvm_reload_remote_mmus(kvm);
1694         }
1695
1696         sp->role.invalid = 1;
1697         kvm_mmu_reset_last_pte_updated(kvm);
1698         return ret;
1699 }
1700
1701 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1702                                     struct list_head *invalid_list)
1703 {
1704         struct kvm_mmu_page *sp;
1705
1706         if (list_empty(invalid_list))
1707                 return;
1708
1709         kvm_flush_remote_tlbs(kvm);
1710
1711         do {
1712                 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1713                 WARN_ON(!sp->role.invalid || sp->root_count);
1714                 kvm_mmu_free_page(kvm, sp);
1715         } while (!list_empty(invalid_list));
1716
1717 }
1718
1719 /*
1720  * Changing the number of mmu pages allocated to the vm
1721  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1722  */
1723 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1724 {
1725         LIST_HEAD(invalid_list);
1726         /*
1727          * If we set the number of mmu pages to be smaller be than the
1728          * number of actived pages , we must to free some mmu pages before we
1729          * change the value
1730          */
1731
1732         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
1733                 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
1734                         !list_empty(&kvm->arch.active_mmu_pages)) {
1735                         struct kvm_mmu_page *page;
1736
1737                         page = container_of(kvm->arch.active_mmu_pages.prev,
1738                                             struct kvm_mmu_page, link);
1739                         kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
1740                         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1741                 }
1742                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
1743         }
1744
1745         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
1746 }
1747
1748 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1749 {
1750         struct kvm_mmu_page *sp;
1751         struct hlist_node *node;
1752         LIST_HEAD(invalid_list);
1753         int r;
1754
1755         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
1756         r = 0;
1757
1758         for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1759                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
1760                          sp->role.word);
1761                 r = 1;
1762                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1763         }
1764         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1765         return r;
1766 }
1767
1768 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1769 {
1770         struct kvm_mmu_page *sp;
1771         struct hlist_node *node;
1772         LIST_HEAD(invalid_list);
1773
1774         for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1775                 pgprintk("%s: zap %llx %x\n",
1776                          __func__, gfn, sp->role.word);
1777                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1778         }
1779         kvm_mmu_commit_zap_page(kvm, &invalid_list);
1780 }
1781
1782 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1783 {
1784         int slot = memslot_id(kvm, gfn);
1785         struct kvm_mmu_page *sp = page_header(__pa(pte));
1786
1787         __set_bit(slot, sp->slot_bitmap);
1788 }
1789
1790 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1791 {
1792         int i;
1793         u64 *pt = sp->spt;
1794
1795         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1796                 return;
1797
1798         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1799                 if (pt[i] == shadow_notrap_nonpresent_pte)
1800                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1801         }
1802 }
1803
1804 /*
1805  * The function is based on mtrr_type_lookup() in
1806  * arch/x86/kernel/cpu/mtrr/generic.c
1807  */
1808 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1809                          u64 start, u64 end)
1810 {
1811         int i;
1812         u64 base, mask;
1813         u8 prev_match, curr_match;
1814         int num_var_ranges = KVM_NR_VAR_MTRR;
1815
1816         if (!mtrr_state->enabled)
1817                 return 0xFF;
1818
1819         /* Make end inclusive end, instead of exclusive */
1820         end--;
1821
1822         /* Look in fixed ranges. Just return the type as per start */
1823         if (mtrr_state->have_fixed && (start < 0x100000)) {
1824                 int idx;
1825
1826                 if (start < 0x80000) {
1827                         idx = 0;
1828                         idx += (start >> 16);
1829                         return mtrr_state->fixed_ranges[idx];
1830                 } else if (start < 0xC0000) {
1831                         idx = 1 * 8;
1832                         idx += ((start - 0x80000) >> 14);
1833                         return mtrr_state->fixed_ranges[idx];
1834                 } else if (start < 0x1000000) {
1835                         idx = 3 * 8;
1836                         idx += ((start - 0xC0000) >> 12);
1837                         return mtrr_state->fixed_ranges[idx];
1838                 }
1839         }
1840
1841         /*
1842          * Look in variable ranges
1843          * Look of multiple ranges matching this address and pick type
1844          * as per MTRR precedence
1845          */
1846         if (!(mtrr_state->enabled & 2))
1847                 return mtrr_state->def_type;
1848
1849         prev_match = 0xFF;
1850         for (i = 0; i < num_var_ranges; ++i) {
1851                 unsigned short start_state, end_state;
1852
1853                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1854                         continue;
1855
1856                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1857                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1858                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1859                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1860
1861                 start_state = ((start & mask) == (base & mask));
1862                 end_state = ((end & mask) == (base & mask));
1863                 if (start_state != end_state)
1864                         return 0xFE;
1865
1866                 if ((start & mask) != (base & mask))
1867                         continue;
1868
1869                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1870                 if (prev_match == 0xFF) {
1871                         prev_match = curr_match;
1872                         continue;
1873                 }
1874
1875                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1876                     curr_match == MTRR_TYPE_UNCACHABLE)
1877                         return MTRR_TYPE_UNCACHABLE;
1878
1879                 if ((prev_match == MTRR_TYPE_WRBACK &&
1880                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1881                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1882                      curr_match == MTRR_TYPE_WRBACK)) {
1883                         prev_match = MTRR_TYPE_WRTHROUGH;
1884                         curr_match = MTRR_TYPE_WRTHROUGH;
1885                 }
1886
1887                 if (prev_match != curr_match)
1888                         return MTRR_TYPE_UNCACHABLE;
1889         }
1890
1891         if (prev_match != 0xFF)
1892                 return prev_match;
1893
1894         return mtrr_state->def_type;
1895 }
1896
1897 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1898 {
1899         u8 mtrr;
1900
1901         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1902                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1903         if (mtrr == 0xfe || mtrr == 0xff)
1904                 mtrr = MTRR_TYPE_WRBACK;
1905         return mtrr;
1906 }
1907 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1908
1909 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1910 {
1911         trace_kvm_mmu_unsync_page(sp);
1912         ++vcpu->kvm->stat.mmu_unsync;
1913         sp->unsync = 1;
1914
1915         kvm_mmu_mark_parents_unsync(sp);
1916         mmu_convert_notrap(sp);
1917 }
1918
1919 static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
1920 {
1921         struct kvm_mmu_page *s;
1922         struct hlist_node *node;
1923
1924         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1925                 if (s->unsync)
1926                         continue;
1927                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1928                 __kvm_unsync_page(vcpu, s);
1929         }
1930 }
1931
1932 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1933                                   bool can_unsync)
1934 {
1935         struct kvm_mmu_page *s;
1936         struct hlist_node *node;
1937         bool need_unsync = false;
1938
1939         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1940                 if (!can_unsync)
1941                         return 1;
1942
1943                 if (s->role.level != PT_PAGE_TABLE_LEVEL)
1944                         return 1;
1945
1946                 if (!need_unsync && !s->unsync) {
1947                         if (!oos_shadow)
1948                                 return 1;
1949                         need_unsync = true;
1950                 }
1951         }
1952         if (need_unsync)
1953                 kvm_unsync_pages(vcpu, gfn);
1954         return 0;
1955 }
1956
1957 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1958                     unsigned pte_access, int user_fault,
1959                     int write_fault, int dirty, int level,
1960                     gfn_t gfn, pfn_t pfn, bool speculative,
1961                     bool can_unsync, bool reset_host_protection)
1962 {
1963         u64 spte;
1964         int ret = 0;
1965
1966         /*
1967          * We don't set the accessed bit, since we sometimes want to see
1968          * whether the guest actually used the pte (in order to detect
1969          * demand paging).
1970          */
1971         spte = shadow_base_present_pte;
1972         if (!speculative)
1973                 spte |= shadow_accessed_mask;
1974         if (!dirty)
1975                 pte_access &= ~ACC_WRITE_MASK;
1976         if (pte_access & ACC_EXEC_MASK)
1977                 spte |= shadow_x_mask;
1978         else
1979                 spte |= shadow_nx_mask;
1980         if (pte_access & ACC_USER_MASK)
1981                 spte |= shadow_user_mask;
1982         if (level > PT_PAGE_TABLE_LEVEL)
1983                 spte |= PT_PAGE_SIZE_MASK;
1984         if (tdp_enabled)
1985                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1986                         kvm_is_mmio_pfn(pfn));
1987
1988         if (reset_host_protection)
1989                 spte |= SPTE_HOST_WRITEABLE;
1990
1991         spte |= (u64)pfn << PAGE_SHIFT;
1992
1993         if ((pte_access & ACC_WRITE_MASK)
1994             || (!vcpu->arch.mmu.direct_map && write_fault
1995                 && !is_write_protection(vcpu) && !user_fault)) {
1996
1997                 if (level > PT_PAGE_TABLE_LEVEL &&
1998                     has_wrprotected_page(vcpu->kvm, gfn, level)) {
1999                         ret = 1;
2000                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2001                         goto done;
2002                 }
2003
2004                 spte |= PT_WRITABLE_MASK;
2005
2006                 if (!vcpu->arch.mmu.direct_map
2007                     && !(pte_access & ACC_WRITE_MASK))
2008                         spte &= ~PT_USER_MASK;
2009
2010                 /*
2011                  * Optimization: for pte sync, if spte was writable the hash
2012                  * lookup is unnecessary (and expensive). Write protection
2013                  * is responsibility of mmu_get_page / kvm_sync_page.
2014                  * Same reasoning can be applied to dirty page accounting.
2015                  */
2016                 if (!can_unsync && is_writable_pte(*sptep))
2017                         goto set_pte;
2018
2019                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2020                         pgprintk("%s: found shadow page for %llx, marking ro\n",
2021                                  __func__, gfn);
2022                         ret = 1;
2023                         pte_access &= ~ACC_WRITE_MASK;
2024                         if (is_writable_pte(spte))
2025                                 spte &= ~PT_WRITABLE_MASK;
2026                 }
2027         }
2028
2029         if (pte_access & ACC_WRITE_MASK)
2030                 mark_page_dirty(vcpu->kvm, gfn);
2031
2032 set_pte:
2033         update_spte(sptep, spte);
2034 done:
2035         return ret;
2036 }
2037
2038 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2039                          unsigned pt_access, unsigned pte_access,
2040                          int user_fault, int write_fault, int dirty,
2041                          int *ptwrite, int level, gfn_t gfn,
2042                          pfn_t pfn, bool speculative,
2043                          bool reset_host_protection)
2044 {
2045         int was_rmapped = 0;
2046         int rmap_count;
2047
2048         pgprintk("%s: spte %llx access %x write_fault %d"
2049                  " user_fault %d gfn %llx\n",
2050                  __func__, *sptep, pt_access,
2051                  write_fault, user_fault, gfn);
2052
2053         if (is_rmap_spte(*sptep)) {
2054                 /*
2055                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2056                  * the parent of the now unreachable PTE.
2057                  */
2058                 if (level > PT_PAGE_TABLE_LEVEL &&
2059                     !is_large_pte(*sptep)) {
2060                         struct kvm_mmu_page *child;
2061                         u64 pte = *sptep;
2062
2063                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2064                         mmu_page_remove_parent_pte(child, sptep);
2065                         __set_spte(sptep, shadow_trap_nonpresent_pte);
2066                         kvm_flush_remote_tlbs(vcpu->kvm);
2067                 } else if (pfn != spte_to_pfn(*sptep)) {
2068                         pgprintk("hfn old %llx new %llx\n",
2069                                  spte_to_pfn(*sptep), pfn);
2070                         drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2071                         kvm_flush_remote_tlbs(vcpu->kvm);
2072                 } else
2073                         was_rmapped = 1;
2074         }
2075
2076         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
2077                       dirty, level, gfn, pfn, speculative, true,
2078                       reset_host_protection)) {
2079                 if (write_fault)
2080                         *ptwrite = 1;
2081                 kvm_mmu_flush_tlb(vcpu);
2082         }
2083
2084         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2085         pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2086                  is_large_pte(*sptep)? "2MB" : "4kB",
2087                  *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
2088                  *sptep, sptep);
2089         if (!was_rmapped && is_large_pte(*sptep))
2090                 ++vcpu->kvm->stat.lpages;
2091
2092         page_header_update_slot(vcpu->kvm, sptep, gfn);
2093         if (!was_rmapped) {
2094                 rmap_count = rmap_add(vcpu, sptep, gfn);
2095                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2096                         rmap_recycle(vcpu, sptep, gfn);
2097         }
2098         kvm_release_pfn_clean(pfn);
2099         if (speculative) {
2100                 vcpu->arch.last_pte_updated = sptep;
2101                 vcpu->arch.last_pte_gfn = gfn;
2102         }
2103 }
2104
2105 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
2106 {
2107 }
2108
2109 static struct kvm_memory_slot *
2110 pte_prefetch_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log)
2111 {
2112         struct kvm_memory_slot *slot;
2113
2114         slot = gfn_to_memslot(vcpu->kvm, gfn);
2115         if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
2116               (no_dirty_log && slot->dirty_bitmap))
2117                 slot = NULL;
2118
2119         return slot;
2120 }
2121
2122 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2123                                      bool no_dirty_log)
2124 {
2125         struct kvm_memory_slot *slot;
2126         unsigned long hva;
2127
2128         slot = pte_prefetch_gfn_to_memslot(vcpu, gfn, no_dirty_log);
2129         if (!slot) {
2130                 get_page(bad_page);
2131                 return page_to_pfn(bad_page);
2132         }
2133
2134         hva = gfn_to_hva_memslot(slot, gfn);
2135
2136         return hva_to_pfn_atomic(vcpu->kvm, hva);
2137 }
2138
2139 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2140                                     struct kvm_mmu_page *sp,
2141                                     u64 *start, u64 *end)
2142 {
2143         struct page *pages[PTE_PREFETCH_NUM];
2144         unsigned access = sp->role.access;
2145         int i, ret;
2146         gfn_t gfn;
2147
2148         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2149         if (!pte_prefetch_gfn_to_memslot(vcpu, gfn, access & ACC_WRITE_MASK))
2150                 return -1;
2151
2152         ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
2153         if (ret <= 0)
2154                 return -1;
2155
2156         for (i = 0; i < ret; i++, gfn++, start++)
2157                 mmu_set_spte(vcpu, start, ACC_ALL,
2158                              access, 0, 0, 1, NULL,
2159                              sp->role.level, gfn,
2160                              page_to_pfn(pages[i]), true, true);
2161
2162         return 0;
2163 }
2164
2165 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2166                                   struct kvm_mmu_page *sp, u64 *sptep)
2167 {
2168         u64 *spte, *start = NULL;
2169         int i;
2170
2171         WARN_ON(!sp->role.direct);
2172
2173         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2174         spte = sp->spt + i;
2175
2176         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2177                 if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
2178                         if (!start)
2179                                 continue;
2180                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2181                                 break;
2182                         start = NULL;
2183                 } else if (!start)
2184                         start = spte;
2185         }
2186 }
2187
2188 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2189 {
2190         struct kvm_mmu_page *sp;
2191
2192         /*
2193          * Since it's no accessed bit on EPT, it's no way to
2194          * distinguish between actually accessed translations
2195          * and prefetched, so disable pte prefetch if EPT is
2196          * enabled.
2197          */
2198         if (!shadow_accessed_mask)
2199                 return;
2200
2201         sp = page_header(__pa(sptep));
2202         if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2203                 return;
2204
2205         __direct_pte_prefetch(vcpu, sp, sptep);
2206 }
2207
2208 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2209                         int level, gfn_t gfn, pfn_t pfn)
2210 {
2211         struct kvm_shadow_walk_iterator iterator;
2212         struct kvm_mmu_page *sp;
2213         int pt_write = 0;
2214         gfn_t pseudo_gfn;
2215
2216         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2217                 if (iterator.level == level) {
2218                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
2219                                      0, write, 1, &pt_write,
2220                                      level, gfn, pfn, false, true);
2221                         direct_pte_prefetch(vcpu, iterator.sptep);
2222                         ++vcpu->stat.pf_fixed;
2223                         break;
2224                 }
2225
2226                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
2227                         u64 base_addr = iterator.addr;
2228
2229                         base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
2230                         pseudo_gfn = base_addr >> PAGE_SHIFT;
2231                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2232                                               iterator.level - 1,
2233                                               1, ACC_ALL, iterator.sptep);
2234                         if (!sp) {
2235                                 pgprintk("nonpaging_map: ENOMEM\n");
2236                                 kvm_release_pfn_clean(pfn);
2237                                 return -ENOMEM;
2238                         }
2239
2240                         __set_spte(iterator.sptep,
2241                                    __pa(sp->spt)
2242                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
2243                                    | shadow_user_mask | shadow_x_mask
2244                                    | shadow_accessed_mask);
2245                 }
2246         }
2247         return pt_write;
2248 }
2249
2250 static void kvm_send_hwpoison_signal(struct kvm *kvm, gfn_t gfn)
2251 {
2252         char buf[1];
2253         void __user *hva;
2254         int r;
2255
2256         /* Touch the page, so send SIGBUS */
2257         hva = (void __user *)gfn_to_hva(kvm, gfn);
2258         r = copy_from_user(buf, hva, 1);
2259 }
2260
2261 static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
2262 {
2263         kvm_release_pfn_clean(pfn);
2264         if (is_hwpoison_pfn(pfn)) {
2265                 kvm_send_hwpoison_signal(kvm, gfn);
2266                 return 0;
2267         } else if (is_fault_pfn(pfn))
2268                 return -EFAULT;
2269
2270         return 1;
2271 }
2272
2273 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
2274 {
2275         int r;
2276         int level;
2277         pfn_t pfn;
2278         unsigned long mmu_seq;
2279
2280         level = mapping_level(vcpu, gfn);
2281
2282         /*
2283          * This path builds a PAE pagetable - so we can map 2mb pages at
2284          * maximum. Therefore check if the level is larger than that.
2285          */
2286         if (level > PT_DIRECTORY_LEVEL)
2287                 level = PT_DIRECTORY_LEVEL;
2288
2289         gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2290
2291         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2292         smp_rmb();
2293         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2294
2295         /* mmio */
2296         if (is_error_pfn(pfn))
2297                 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2298
2299         spin_lock(&vcpu->kvm->mmu_lock);
2300         if (mmu_notifier_retry(vcpu, mmu_seq))
2301                 goto out_unlock;
2302         kvm_mmu_free_some_pages(vcpu);
2303         r = __direct_map(vcpu, v, write, level, gfn, pfn);
2304         spin_unlock(&vcpu->kvm->mmu_lock);
2305
2306
2307         return r;
2308
2309 out_unlock:
2310         spin_unlock(&vcpu->kvm->mmu_lock);
2311         kvm_release_pfn_clean(pfn);
2312         return 0;
2313 }
2314
2315
2316 static void mmu_free_roots(struct kvm_vcpu *vcpu)
2317 {
2318         int i;
2319         struct kvm_mmu_page *sp;
2320         LIST_HEAD(invalid_list);
2321
2322         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2323                 return;
2324         spin_lock(&vcpu->kvm->mmu_lock);
2325         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
2326             (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
2327              vcpu->arch.mmu.direct_map)) {
2328                 hpa_t root = vcpu->arch.mmu.root_hpa;
2329
2330                 sp = page_header(root);
2331                 --sp->root_count;
2332                 if (!sp->root_count && sp->role.invalid) {
2333                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2334                         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2335                 }
2336                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2337                 spin_unlock(&vcpu->kvm->mmu_lock);
2338                 return;
2339         }
2340         for (i = 0; i < 4; ++i) {
2341                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2342
2343                 if (root) {
2344                         root &= PT64_BASE_ADDR_MASK;
2345                         sp = page_header(root);
2346                         --sp->root_count;
2347                         if (!sp->root_count && sp->role.invalid)
2348                                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2349                                                          &invalid_list);
2350                 }
2351                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2352         }
2353         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2354         spin_unlock(&vcpu->kvm->mmu_lock);
2355         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2356 }
2357
2358 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2359 {
2360         int ret = 0;
2361
2362         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2363                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2364                 ret = 1;
2365         }
2366
2367         return ret;
2368 }
2369
2370 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
2371 {
2372         struct kvm_mmu_page *sp;
2373         int i;
2374
2375         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2376                 spin_lock(&vcpu->kvm->mmu_lock);
2377                 kvm_mmu_free_some_pages(vcpu);
2378                 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
2379                                       1, ACC_ALL, NULL);
2380                 ++sp->root_count;
2381                 spin_unlock(&vcpu->kvm->mmu_lock);
2382                 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
2383         } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
2384                 for (i = 0; i < 4; ++i) {
2385                         hpa_t root = vcpu->arch.mmu.pae_root[i];
2386
2387                         ASSERT(!VALID_PAGE(root));
2388                         spin_lock(&vcpu->kvm->mmu_lock);
2389                         kvm_mmu_free_some_pages(vcpu);
2390                         sp = kvm_mmu_get_page(vcpu, i << 30, i << 30,
2391                                               PT32_ROOT_LEVEL, 1, ACC_ALL,
2392                                               NULL);
2393                         root = __pa(sp->spt);
2394                         ++sp->root_count;
2395                         spin_unlock(&vcpu->kvm->mmu_lock);
2396                         vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2397                 }
2398                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2399         } else
2400                 BUG();
2401
2402         return 0;
2403 }
2404
2405 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2406 {
2407         struct kvm_mmu_page *sp;
2408         u64 pdptr, pm_mask;
2409         gfn_t root_gfn;
2410         int i;
2411
2412         root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
2413
2414         if (mmu_check_root(vcpu, root_gfn))
2415                 return 1;
2416
2417         /*
2418          * Do we shadow a long mode page table? If so we need to
2419          * write-protect the guests page table root.
2420          */
2421         if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2422                 hpa_t root = vcpu->arch.mmu.root_hpa;
2423
2424                 ASSERT(!VALID_PAGE(root));
2425
2426                 spin_lock(&vcpu->kvm->mmu_lock);
2427                 kvm_mmu_free_some_pages(vcpu);
2428                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
2429                                       0, ACC_ALL, NULL);
2430                 root = __pa(sp->spt);
2431                 ++sp->root_count;
2432                 spin_unlock(&vcpu->kvm->mmu_lock);
2433                 vcpu->arch.mmu.root_hpa = root;
2434                 return 0;
2435         }
2436
2437         /*
2438          * We shadow a 32 bit page table. This may be a legacy 2-level
2439          * or a PAE 3-level page table. In either case we need to be aware that
2440          * the shadow page table may be a PAE or a long mode page table.
2441          */
2442         pm_mask = PT_PRESENT_MASK;
2443         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
2444                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
2445
2446         for (i = 0; i < 4; ++i) {
2447                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2448
2449                 ASSERT(!VALID_PAGE(root));
2450                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2451                         pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i);
2452                         if (!is_present_gpte(pdptr)) {
2453                                 vcpu->arch.mmu.pae_root[i] = 0;
2454                                 continue;
2455                         }
2456                         root_gfn = pdptr >> PAGE_SHIFT;
2457                         if (mmu_check_root(vcpu, root_gfn))
2458                                 return 1;
2459                 }
2460                 spin_lock(&vcpu->kvm->mmu_lock);
2461                 kvm_mmu_free_some_pages(vcpu);
2462                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2463                                       PT32_ROOT_LEVEL, 0,
2464                                       ACC_ALL, NULL);
2465                 root = __pa(sp->spt);
2466                 ++sp->root_count;
2467                 spin_unlock(&vcpu->kvm->mmu_lock);
2468
2469                 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
2470         }
2471         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2472
2473         /*
2474          * If we shadow a 32 bit page table with a long mode page
2475          * table we enter this path.
2476          */
2477         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2478                 if (vcpu->arch.mmu.lm_root == NULL) {
2479                         /*
2480                          * The additional page necessary for this is only
2481                          * allocated on demand.
2482                          */
2483
2484                         u64 *lm_root;
2485
2486                         lm_root = (void*)get_zeroed_page(GFP_KERNEL);
2487                         if (lm_root == NULL)
2488                                 return 1;
2489
2490                         lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
2491
2492                         vcpu->arch.mmu.lm_root = lm_root;
2493                 }
2494
2495                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
2496         }
2497
2498         return 0;
2499 }
2500
2501 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2502 {
2503         if (vcpu->arch.mmu.direct_map)
2504                 return mmu_alloc_direct_roots(vcpu);
2505         else
2506                 return mmu_alloc_shadow_roots(vcpu);
2507 }
2508
2509 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2510 {
2511         int i;
2512         struct kvm_mmu_page *sp;
2513
2514         if (vcpu->arch.mmu.direct_map)
2515                 return;
2516
2517         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2518                 return;
2519         if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2520                 hpa_t root = vcpu->arch.mmu.root_hpa;
2521                 sp = page_header(root);
2522                 mmu_sync_children(vcpu, sp);
2523                 return;
2524         }
2525         for (i = 0; i < 4; ++i) {
2526                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2527
2528                 if (root && VALID_PAGE(root)) {
2529                         root &= PT64_BASE_ADDR_MASK;
2530                         sp = page_header(root);
2531                         mmu_sync_children(vcpu, sp);
2532                 }
2533         }
2534 }
2535
2536 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2537 {
2538         spin_lock(&vcpu->kvm->mmu_lock);
2539         mmu_sync_roots(vcpu);
2540         spin_unlock(&vcpu->kvm->mmu_lock);
2541 }
2542
2543 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2544                                   u32 access, u32 *error)
2545 {
2546         if (error)
2547                 *error = 0;
2548         return vaddr;
2549 }
2550
2551 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2552                                          u32 access, u32 *error)
2553 {
2554         if (error)
2555                 *error = 0;
2556         return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
2557 }
2558
2559 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2560                                 u32 error_code)
2561 {
2562         gfn_t gfn;
2563         int r;
2564
2565         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2566         r = mmu_topup_memory_caches(vcpu);
2567         if (r)
2568                 return r;
2569
2570         ASSERT(vcpu);
2571         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2572
2573         gfn = gva >> PAGE_SHIFT;
2574
2575         return nonpaging_map(vcpu, gva & PAGE_MASK,
2576                              error_code & PFERR_WRITE_MASK, gfn);
2577 }
2578
2579 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2580                                 u32 error_code)
2581 {
2582         pfn_t pfn;
2583         int r;
2584         int level;
2585         gfn_t gfn = gpa >> PAGE_SHIFT;
2586         unsigned long mmu_seq;
2587
2588         ASSERT(vcpu);
2589         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2590
2591         r = mmu_topup_memory_caches(vcpu);
2592         if (r)
2593                 return r;
2594
2595         level = mapping_level(vcpu, gfn);
2596
2597         gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2598
2599         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2600         smp_rmb();
2601         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2602         if (is_error_pfn(pfn))
2603                 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2604         spin_lock(&vcpu->kvm->mmu_lock);
2605         if (mmu_notifier_retry(vcpu, mmu_seq))
2606                 goto out_unlock;
2607         kvm_mmu_free_some_pages(vcpu);
2608         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2609                          level, gfn, pfn);
2610         spin_unlock(&vcpu->kvm->mmu_lock);
2611
2612         return r;
2613
2614 out_unlock:
2615         spin_unlock(&vcpu->kvm->mmu_lock);
2616         kvm_release_pfn_clean(pfn);
2617         return 0;
2618 }
2619
2620 static void nonpaging_free(struct kvm_vcpu *vcpu)
2621 {
2622         mmu_free_roots(vcpu);
2623 }
2624
2625 static int nonpaging_init_context(struct kvm_vcpu *vcpu,
2626                                   struct kvm_mmu *context)
2627 {
2628         context->new_cr3 = nonpaging_new_cr3;
2629         context->page_fault = nonpaging_page_fault;
2630         context->gva_to_gpa = nonpaging_gva_to_gpa;
2631         context->free = nonpaging_free;
2632         context->prefetch_page = nonpaging_prefetch_page;
2633         context->sync_page = nonpaging_sync_page;
2634         context->invlpg = nonpaging_invlpg;
2635         context->root_level = 0;
2636         context->shadow_root_level = PT32E_ROOT_LEVEL;
2637         context->root_hpa = INVALID_PAGE;
2638         context->direct_map = true;
2639         context->nx = false;
2640         return 0;
2641 }
2642
2643 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2644 {
2645         ++vcpu->stat.tlb_flush;
2646         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2647 }
2648
2649 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2650 {
2651         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2652         mmu_free_roots(vcpu);
2653 }
2654
2655 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
2656 {
2657         return vcpu->arch.cr3;
2658 }
2659
2660 static void inject_page_fault(struct kvm_vcpu *vcpu)
2661 {
2662         vcpu->arch.mmu.inject_page_fault(vcpu);
2663 }
2664
2665 static void paging_free(struct kvm_vcpu *vcpu)
2666 {
2667         nonpaging_free(vcpu);
2668 }
2669
2670 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
2671 {
2672         int bit7;
2673
2674         bit7 = (gpte >> 7) & 1;
2675         return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
2676 }
2677
2678 #define PTTYPE 64
2679 #include "paging_tmpl.h"
2680 #undef PTTYPE
2681
2682 #define PTTYPE 32
2683 #include "paging_tmpl.h"
2684 #undef PTTYPE
2685
2686 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
2687                                   struct kvm_mmu *context,
2688                                   int level)
2689 {
2690         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2691         u64 exb_bit_rsvd = 0;
2692
2693         if (!context->nx)
2694                 exb_bit_rsvd = rsvd_bits(63, 63);
2695         switch (level) {
2696         case PT32_ROOT_LEVEL:
2697                 /* no rsvd bits for 2 level 4K page table entries */
2698                 context->rsvd_bits_mask[0][1] = 0;
2699                 context->rsvd_bits_mask[0][0] = 0;
2700                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2701
2702                 if (!is_pse(vcpu)) {
2703                         context->rsvd_bits_mask[1][1] = 0;
2704                         break;
2705                 }
2706
2707                 if (is_cpuid_PSE36())
2708                         /* 36bits PSE 4MB page */
2709                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2710                 else
2711                         /* 32 bits PSE 4MB page */
2712                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2713                 break;
2714         case PT32E_ROOT_LEVEL:
2715                 context->rsvd_bits_mask[0][2] =
2716                         rsvd_bits(maxphyaddr, 63) |
2717                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2718                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2719                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2720                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2721                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2722                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2723                         rsvd_bits(maxphyaddr, 62) |
2724                         rsvd_bits(13, 20);              /* large page */
2725                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2726                 break;
2727         case PT64_ROOT_LEVEL:
2728                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2729                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2730                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2731                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2732                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2733                         rsvd_bits(maxphyaddr, 51);
2734                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2735                         rsvd_bits(maxphyaddr, 51);
2736                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2737                 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2738                         rsvd_bits(maxphyaddr, 51) |
2739                         rsvd_bits(13, 29);
2740                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2741                         rsvd_bits(maxphyaddr, 51) |
2742                         rsvd_bits(13, 20);              /* large page */
2743                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2744                 break;
2745         }
2746 }
2747
2748 static int paging64_init_context_common(struct kvm_vcpu *vcpu,
2749                                         struct kvm_mmu *context,
2750                                         int level)
2751 {
2752         context->nx = is_nx(vcpu);
2753
2754         reset_rsvds_bits_mask(vcpu, context, level);
2755
2756         ASSERT(is_pae(vcpu));
2757         context->new_cr3 = paging_new_cr3;
2758         context->page_fault = paging64_page_fault;
2759         context->gva_to_gpa = paging64_gva_to_gpa;
2760         context->prefetch_page = paging64_prefetch_page;
2761         context->sync_page = paging64_sync_page;
2762         context->invlpg = paging64_invlpg;
2763         context->free = paging_free;
2764         context->root_level = level;
2765         context->shadow_root_level = level;
2766         context->root_hpa = INVALID_PAGE;
2767         context->direct_map = false;
2768         return 0;
2769 }
2770
2771 static int paging64_init_context(struct kvm_vcpu *vcpu,
2772                                  struct kvm_mmu *context)
2773 {
2774         return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
2775 }
2776
2777 static int paging32_init_context(struct kvm_vcpu *vcpu,
2778                                  struct kvm_mmu *context)
2779 {
2780         context->nx = false;
2781
2782         reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2783
2784         context->new_cr3 = paging_new_cr3;
2785         context->page_fault = paging32_page_fault;
2786         context->gva_to_gpa = paging32_gva_to_gpa;
2787         context->free = paging_free;
2788         context->prefetch_page = paging32_prefetch_page;
2789         context->sync_page = paging32_sync_page;
2790         context->invlpg = paging32_invlpg;
2791         context->root_level = PT32_ROOT_LEVEL;
2792         context->shadow_root_level = PT32E_ROOT_LEVEL;
2793         context->root_hpa = INVALID_PAGE;
2794         context->direct_map = false;
2795         return 0;
2796 }
2797
2798 static int paging32E_init_context(struct kvm_vcpu *vcpu,
2799                                   struct kvm_mmu *context)
2800 {
2801         return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
2802 }
2803
2804 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2805 {
2806         struct kvm_mmu *context = vcpu->arch.walk_mmu;
2807
2808         context->new_cr3 = nonpaging_new_cr3;
2809         context->page_fault = tdp_page_fault;
2810         context->free = nonpaging_free;
2811         context->prefetch_page = nonpaging_prefetch_page;
2812         context->sync_page = nonpaging_sync_page;
2813         context->invlpg = nonpaging_invlpg;
2814         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2815         context->root_hpa = INVALID_PAGE;
2816         context->direct_map = true;
2817         context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
2818         context->get_cr3 = get_cr3;
2819         context->inject_page_fault = kvm_inject_page_fault;
2820         context->nx = is_nx(vcpu);
2821
2822         if (!is_paging(vcpu)) {
2823                 context->nx = false;
2824                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2825                 context->root_level = 0;
2826         } else if (is_long_mode(vcpu)) {
2827                 context->nx = is_nx(vcpu);
2828                 reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
2829                 context->gva_to_gpa = paging64_gva_to_gpa;
2830                 context->root_level = PT64_ROOT_LEVEL;
2831         } else if (is_pae(vcpu)) {
2832                 context->nx = is_nx(vcpu);
2833                 reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
2834                 context->gva_to_gpa = paging64_gva_to_gpa;
2835                 context->root_level = PT32E_ROOT_LEVEL;
2836         } else {
2837                 context->nx = false;
2838                 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2839                 context->gva_to_gpa = paging32_gva_to_gpa;
2840                 context->root_level = PT32_ROOT_LEVEL;
2841         }
2842
2843         return 0;
2844 }
2845
2846 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
2847 {
2848         int r;
2849         ASSERT(vcpu);
2850         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2851
2852         if (!is_paging(vcpu))
2853                 r = nonpaging_init_context(vcpu, context);
2854         else if (is_long_mode(vcpu))
2855                 r = paging64_init_context(vcpu, context);
2856         else if (is_pae(vcpu))
2857                 r = paging32E_init_context(vcpu, context);
2858         else
2859                 r = paging32_init_context(vcpu, context);
2860
2861         vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2862         vcpu->arch.mmu.base_role.cr0_wp  = is_write_protection(vcpu);
2863
2864         return r;
2865 }
2866 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
2867
2868 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2869 {
2870         int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
2871
2872         vcpu->arch.walk_mmu->set_cr3           = kvm_x86_ops->set_cr3;
2873         vcpu->arch.walk_mmu->get_cr3           = get_cr3;
2874         vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
2875
2876         return r;
2877 }
2878
2879 static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
2880 {
2881         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
2882
2883         g_context->get_cr3           = get_cr3;
2884         g_context->inject_page_fault = kvm_inject_page_fault;
2885
2886         /*
2887          * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
2888          * translation of l2_gpa to l1_gpa addresses is done using the
2889          * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
2890          * functions between mmu and nested_mmu are swapped.
2891          */
2892         if (!is_paging(vcpu)) {
2893                 g_context->nx = false;
2894                 g_context->root_level = 0;
2895                 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
2896         } else if (is_long_mode(vcpu)) {
2897                 g_context->nx = is_nx(vcpu);
2898                 reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
2899                 g_context->root_level = PT64_ROOT_LEVEL;
2900                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
2901         } else if (is_pae(vcpu)) {
2902                 g_context->nx = is_nx(vcpu);
2903                 reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
2904                 g_context->root_level = PT32E_ROOT_LEVEL;
2905                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
2906         } else {
2907                 g_context->nx = false;
2908                 reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
2909                 g_context->root_level = PT32_ROOT_LEVEL;
2910                 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
2911         }
2912
2913         return 0;
2914 }
2915
2916 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2917 {
2918         vcpu->arch.update_pte.pfn = bad_pfn;
2919
2920         if (mmu_is_nested(vcpu))
2921                 return init_kvm_nested_mmu(vcpu);
2922         else if (tdp_enabled)
2923                 return init_kvm_tdp_mmu(vcpu);
2924         else
2925                 return init_kvm_softmmu(vcpu);
2926 }
2927
2928 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2929 {
2930         ASSERT(vcpu);
2931         if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
2932                 /* mmu.free() should set root_hpa = INVALID_PAGE */
2933                 vcpu->arch.mmu.free(vcpu);
2934 }
2935
2936 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2937 {
2938         destroy_kvm_mmu(vcpu);
2939         return init_kvm_mmu(vcpu);
2940 }
2941 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2942
2943 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2944 {
2945         int r;
2946
2947         r = mmu_topup_memory_caches(vcpu);
2948         if (r)
2949                 goto out;
2950         r = mmu_alloc_roots(vcpu);
2951         spin_lock(&vcpu->kvm->mmu_lock);
2952         mmu_sync_roots(vcpu);
2953         spin_unlock(&vcpu->kvm->mmu_lock);
2954         if (r)
2955                 goto out;
2956         /* set_cr3() should ensure TLB has been flushed */
2957         vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2958 out:
2959         return r;
2960 }
2961 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2962
2963 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2964 {
2965         mmu_free_roots(vcpu);
2966 }
2967 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
2968
2969 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2970                                   struct kvm_mmu_page *sp,
2971                                   u64 *spte)
2972 {
2973         u64 pte;
2974         struct kvm_mmu_page *child;
2975
2976         pte = *spte;
2977         if (is_shadow_present_pte(pte)) {
2978                 if (is_last_spte(pte, sp->role.level))
2979                         drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
2980                 else {
2981                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2982                         mmu_page_remove_parent_pte(child, spte);
2983                 }
2984         }
2985         __set_spte(spte, shadow_trap_nonpresent_pte);
2986         if (is_large_pte(pte))
2987                 --vcpu->kvm->stat.lpages;
2988 }
2989
2990 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2991                                   struct kvm_mmu_page *sp,
2992                                   u64 *spte,
2993                                   const void *new)
2994 {
2995         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2996                 ++vcpu->kvm->stat.mmu_pde_zapped;
2997                 return;
2998         }
2999
3000         if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
3001                 return;
3002
3003         ++vcpu->kvm->stat.mmu_pte_updated;
3004         if (!sp->role.cr4_pae)
3005                 paging32_update_pte(vcpu, sp, spte, new);
3006         else
3007                 paging64_update_pte(vcpu, sp, spte, new);
3008 }
3009
3010 static bool need_remote_flush(u64 old, u64 new)
3011 {
3012         if (!is_shadow_present_pte(old))
3013                 return false;
3014         if (!is_shadow_present_pte(new))
3015                 return true;
3016         if ((old ^ new) & PT64_BASE_ADDR_MASK)
3017                 return true;
3018         old ^= PT64_NX_MASK;
3019         new ^= PT64_NX_MASK;
3020         return (old & ~new & PT64_PERM_MASK) != 0;
3021 }
3022
3023 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
3024                                     bool remote_flush, bool local_flush)
3025 {
3026         if (zap_page)
3027                 return;
3028
3029         if (remote_flush)
3030                 kvm_flush_remote_tlbs(vcpu->kvm);
3031         else if (local_flush)
3032                 kvm_mmu_flush_tlb(vcpu);
3033 }
3034
3035 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
3036 {
3037         u64 *spte = vcpu->arch.last_pte_updated;
3038
3039         return !!(spte && (*spte & shadow_accessed_mask));
3040 }
3041
3042 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3043                                           u64 gpte)
3044 {
3045         gfn_t gfn;
3046         pfn_t pfn;
3047
3048         if (!is_present_gpte(gpte))
3049                 return;
3050         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
3051
3052         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
3053         smp_rmb();
3054         pfn = gfn_to_pfn(vcpu->kvm, gfn);
3055
3056         if (is_error_pfn(pfn)) {
3057                 kvm_release_pfn_clean(pfn);
3058                 return;
3059         }
3060         vcpu->arch.update_pte.gfn = gfn;
3061         vcpu->arch.update_pte.pfn = pfn;
3062 }
3063
3064 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
3065 {
3066         u64 *spte = vcpu->arch.last_pte_updated;
3067
3068         if (spte
3069             && vcpu->arch.last_pte_gfn == gfn
3070             && shadow_accessed_mask
3071             && !(*spte & shadow_accessed_mask)
3072             && is_shadow_present_pte(*spte))
3073                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
3074 }
3075
3076 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3077                        const u8 *new, int bytes,
3078                        bool guest_initiated)
3079 {
3080         gfn_t gfn = gpa >> PAGE_SHIFT;
3081         union kvm_mmu_page_role mask = { .word = 0 };
3082         struct kvm_mmu_page *sp;
3083         struct hlist_node *node;
3084         LIST_HEAD(invalid_list);
3085         u64 entry, gentry;
3086         u64 *spte;
3087         unsigned offset = offset_in_page(gpa);
3088         unsigned pte_size;
3089         unsigned page_offset;
3090         unsigned misaligned;
3091         unsigned quadrant;
3092         int level;
3093         int flooded = 0;
3094         int npte;
3095         int r;
3096         int invlpg_counter;
3097         bool remote_flush, local_flush, zap_page;
3098
3099         zap_page = remote_flush = local_flush = false;
3100
3101         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
3102
3103         invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
3104
3105         /*
3106          * Assume that the pte write on a page table of the same type
3107          * as the current vcpu paging mode.  This is nearly always true
3108          * (might be false while changing modes).  Note it is verified later
3109          * by update_pte().
3110          */
3111         if ((is_pae(vcpu) && bytes == 4) || !new) {
3112                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
3113                 if (is_pae(vcpu)) {
3114                         gpa &= ~(gpa_t)7;
3115                         bytes = 8;
3116                 }
3117                 r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
3118                 if (r)
3119                         gentry = 0;
3120                 new = (const u8 *)&gentry;
3121         }
3122
3123         switch (bytes) {
3124         case 4:
3125                 gentry = *(const u32 *)new;
3126                 break;
3127         case 8:
3128                 gentry = *(const u64 *)new;
3129                 break;
3130         default:
3131                 gentry = 0;
3132                 break;
3133         }
3134
3135         mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
3136         spin_lock(&vcpu->kvm->mmu_lock);
3137         if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
3138                 gentry = 0;
3139         kvm_mmu_access_page(vcpu, gfn);
3140         kvm_mmu_free_some_pages(vcpu);
3141         ++vcpu->kvm->stat.mmu_pte_write;
3142         trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3143         if (guest_initiated) {
3144                 if (gfn == vcpu->arch.last_pt_write_gfn
3145                     && !last_updated_pte_accessed(vcpu)) {
3146                         ++vcpu->arch.last_pt_write_count;
3147                         if (vcpu->arch.last_pt_write_count >= 3)
3148                                 flooded = 1;
3149                 } else {
3150                         vcpu->arch.last_pt_write_gfn = gfn;
3151                         vcpu->arch.last_pt_write_count = 1;
3152                         vcpu->arch.last_pte_updated = NULL;
3153                 }
3154         }
3155
3156         mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3157         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
3158                 pte_size = sp->role.cr4_pae ? 8 : 4;
3159                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
3160                 misaligned |= bytes < 4;
3161                 if (misaligned || flooded) {
3162                         /*
3163                          * Misaligned accesses are too much trouble to fix
3164                          * up; also, they usually indicate a page is not used
3165                          * as a page table.
3166                          *
3167                          * If we're seeing too many writes to a page,
3168                          * it may no longer be a page table, or we may be
3169                          * forking, in which case it is better to unmap the
3170                          * page.
3171                          */
3172                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
3173                                  gpa, bytes, sp->role.word);
3174                         zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3175                                                      &invalid_list);
3176                         ++vcpu->kvm->stat.mmu_flooded;
3177                         continue;
3178                 }
3179                 page_offset = offset;
3180                 level = sp->role.level;
3181                 npte = 1;
3182                 if (!sp->role.cr4_pae) {
3183                         page_offset <<= 1;      /* 32->64 */
3184                         /*
3185                          * A 32-bit pde maps 4MB while the shadow pdes map
3186                          * only 2MB.  So we need to double the offset again
3187                          * and zap two pdes instead of one.
3188                          */
3189                         if (level == PT32_ROOT_LEVEL) {
3190                                 page_offset &= ~7; /* kill rounding error */
3191                                 page_offset <<= 1;
3192                                 npte = 2;
3193                         }
3194                         quadrant = page_offset >> PAGE_SHIFT;
3195                         page_offset &= ~PAGE_MASK;
3196                         if (quadrant != sp->role.quadrant)
3197                                 continue;
3198                 }
3199                 local_flush = true;
3200                 spte = &sp->spt[page_offset / sizeof(*spte)];
3201                 while (npte--) {
3202                         entry = *spte;
3203                         mmu_pte_write_zap_pte(vcpu, sp, spte);
3204                         if (gentry &&
3205                               !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
3206                               & mask.word))
3207                                 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
3208                         if (!remote_flush && need_remote_flush(entry, *spte))
3209                                 remote_flush = true;
3210                         ++spte;
3211                 }
3212         }
3213         mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
3214         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3215         trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
3216         spin_unlock(&vcpu->kvm->mmu_lock);
3217         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
3218                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
3219                 vcpu->arch.update_pte.pfn = bad_pfn;
3220         }
3221 }
3222
3223 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
3224 {
3225         gpa_t gpa;
3226         int r;
3227
3228         if (vcpu->arch.mmu.direct_map)
3229                 return 0;
3230
3231         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
3232
3233         spin_lock(&vcpu->kvm->mmu_lock);
3234         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3235         spin_unlock(&vcpu->kvm->mmu_lock);
3236         return r;
3237 }
3238 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
3239
3240 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
3241 {
3242         LIST_HEAD(invalid_list);
3243
3244         while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
3245                !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
3246                 struct kvm_mmu_page *sp;
3247
3248                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
3249                                   struct kvm_mmu_page, link);
3250                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3251                 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3252                 ++vcpu->kvm->stat.mmu_recycled;
3253         }
3254 }
3255
3256 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
3257 {
3258         int r;
3259         enum emulation_result er;
3260
3261         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3262         if (r < 0)
3263                 goto out;
3264
3265         if (!r) {
3266                 r = 1;
3267                 goto out;
3268         }
3269
3270         r = mmu_topup_memory_caches(vcpu);
3271         if (r)
3272                 goto out;
3273
3274         er = emulate_instruction(vcpu, cr2, error_code, 0);
3275
3276         switch (er) {
3277         case EMULATE_DONE:
3278                 return 1;
3279         case EMULATE_DO_MMIO:
3280                 ++vcpu->stat.mmio_exits;
3281                 /* fall through */
3282         case EMULATE_FAIL:
3283                 return 0;
3284         default:
3285                 BUG();
3286         }
3287 out:
3288         return r;
3289 }
3290 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
3291
3292 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
3293 {
3294         vcpu->arch.mmu.invlpg(vcpu, gva);
3295         kvm_mmu_flush_tlb(vcpu);
3296         ++vcpu->stat.invlpg;
3297 }
3298 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
3299
3300 void kvm_enable_tdp(void)
3301 {
3302         tdp_enabled = true;
3303 }
3304 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
3305
3306 void kvm_disable_tdp(void)
3307 {
3308         tdp_enabled = false;
3309 }
3310 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
3311
3312 static void free_mmu_pages(struct kvm_vcpu *vcpu)
3313 {
3314         free_page((unsigned long)vcpu->arch.mmu.pae_root);
3315         if (vcpu->arch.mmu.lm_root != NULL)
3316                 free_page((unsigned long)vcpu->arch.mmu.lm_root);
3317 }
3318
3319 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
3320 {
3321         struct page *page;
3322         int i;
3323
3324         ASSERT(vcpu);
3325
3326         /*
3327          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
3328          * Therefore we need to allocate shadow page tables in the first
3329          * 4GB of memory, which happens to fit the DMA32 zone.
3330          */
3331         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
3332         if (!page)
3333                 return -ENOMEM;
3334
3335         vcpu->arch.mmu.pae_root = page_address(page);
3336         for (i = 0; i < 4; ++i)
3337                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3338
3339         return 0;
3340 }
3341
3342 int kvm_mmu_create(struct kvm_vcpu *vcpu)
3343 {
3344         ASSERT(vcpu);
3345         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3346
3347         return alloc_mmu_pages(vcpu);
3348 }
3349
3350 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
3351 {
3352         ASSERT(vcpu);
3353         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3354
3355         return init_kvm_mmu(vcpu);
3356 }
3357
3358 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
3359 {
3360         ASSERT(vcpu);
3361
3362         destroy_kvm_mmu(vcpu);
3363         free_mmu_pages(vcpu);
3364         mmu_free_memory_caches(vcpu);
3365 }
3366
3367 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
3368 {
3369         struct kvm_mmu_page *sp;
3370
3371         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
3372                 int i;
3373                 u64 *pt;
3374
3375                 if (!test_bit(slot, sp->slot_bitmap))
3376                         continue;
3377
3378                 pt = sp->spt;
3379                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
3380                         /* avoid RMW */
3381                         if (is_writable_pte(pt[i]))
3382                                 pt[i] &= ~PT_WRITABLE_MASK;
3383         }
3384         kvm_flush_remote_tlbs(kvm);
3385 }
3386
3387 void kvm_mmu_zap_all(struct kvm *kvm)
3388 {
3389         struct kvm_mmu_page *sp, *node;
3390         LIST_HEAD(invalid_list);
3391
3392         spin_lock(&kvm->mmu_lock);
3393 restart:
3394         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
3395                 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
3396                         goto restart;
3397
3398         kvm_mmu_commit_zap_page(kvm, &invalid_list);
3399         spin_unlock(&kvm->mmu_lock);
3400 }
3401
3402 static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3403                                                struct list_head *invalid_list)
3404 {
3405         struct kvm_mmu_page *page;
3406
3407         page = container_of(kvm->arch.active_mmu_pages.prev,
3408                             struct kvm_mmu_page, link);
3409         return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3410 }
3411
3412 static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3413 {
3414         struct kvm *kvm;
3415         struct kvm *kvm_freed = NULL;
3416
3417         if (nr_to_scan == 0)
3418                 goto out;
3419
3420         spin_lock(&kvm_lock);
3421
3422         list_for_each_entry(kvm, &vm_list, vm_list) {
3423                 int idx, freed_pages;
3424                 LIST_HEAD(invalid_list);
3425
3426                 idx = srcu_read_lock(&kvm->srcu);
3427                 spin_lock(&kvm->mmu_lock);
3428                 if (!kvm_freed && nr_to_scan > 0 &&
3429                     kvm->arch.n_used_mmu_pages > 0) {
3430                         freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3431                                                           &invalid_list);
3432                         kvm_freed = kvm;
3433                 }
3434                 nr_to_scan--;
3435
3436                 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3437                 spin_unlock(&kvm->mmu_lock);
3438                 srcu_read_unlock(&kvm->srcu, idx);
3439         }
3440         if (kvm_freed)
3441                 list_move_tail(&kvm_freed->vm_list, &vm_list);
3442
3443         spin_unlock(&kvm_lock);
3444
3445 out:
3446         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
3447 }
3448
3449 static struct shrinker mmu_shrinker = {
3450         .shrink = mmu_shrink,
3451         .seeks = DEFAULT_SEEKS * 10,
3452 };
3453
3454 static void mmu_destroy_caches(void)
3455 {
3456         if (pte_chain_cache)
3457                 kmem_cache_destroy(pte_chain_cache);
3458         if (rmap_desc_cache)
3459                 kmem_cache_destroy(rmap_desc_cache);
3460         if (mmu_page_header_cache)
3461                 kmem_cache_destroy(mmu_page_header_cache);
3462 }
3463
3464 void kvm_mmu_module_exit(void)
3465 {
3466         mmu_destroy_caches();
3467         percpu_counter_destroy(&kvm_total_used_mmu_pages);
3468         unregister_shrinker(&mmu_shrinker);
3469 }
3470
3471 int kvm_mmu_module_init(void)
3472 {
3473         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
3474                                             sizeof(struct kvm_pte_chain),
3475                                             0, 0, NULL);
3476         if (!pte_chain_cache)
3477                 goto nomem;
3478         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
3479                                             sizeof(struct kvm_rmap_desc),
3480                                             0, 0, NULL);
3481         if (!rmap_desc_cache)
3482                 goto nomem;
3483
3484         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
3485                                                   sizeof(struct kvm_mmu_page),
3486                                                   0, 0, NULL);
3487         if (!mmu_page_header_cache)
3488                 goto nomem;
3489
3490         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
3491                 goto nomem;
3492
3493         register_shrinker(&mmu_shrinker);
3494
3495         return 0;
3496
3497 nomem:
3498         mmu_destroy_caches();
3499         return -ENOMEM;
3500 }
3501
3502 /*
3503  * Caculate mmu pages needed for kvm.
3504  */
3505 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3506 {
3507         int i;
3508         unsigned int nr_mmu_pages;
3509         unsigned int  nr_pages = 0;
3510         struct kvm_memslots *slots;
3511
3512         slots = kvm_memslots(kvm);
3513
3514         for (i = 0; i < slots->nmemslots; i++)
3515                 nr_pages += slots->memslots[i].npages;
3516
3517         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3518         nr_mmu_pages = max(nr_mmu_pages,
3519                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3520
3521         return nr_mmu_pages;
3522 }
3523
3524 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3525                                 unsigned len)
3526 {
3527         if (len > buffer->len)
3528                 return NULL;
3529         return buffer->ptr;
3530 }
3531
3532 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3533                                 unsigned len)
3534 {
3535         void *ret;
3536
3537         ret = pv_mmu_peek_buffer(buffer, len);
3538         if (!ret)
3539                 return ret;
3540         buffer->ptr += len;
3541         buffer->len -= len;
3542         buffer->processed += len;
3543         return ret;
3544 }
3545
3546 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3547                              gpa_t addr, gpa_t value)
3548 {
3549         int bytes = 8;
3550         int r;
3551
3552         if (!is_long_mode(vcpu) && !is_pae(vcpu))
3553                 bytes = 4;
3554
3555         r = mmu_topup_memory_caches(vcpu);
3556         if (r)
3557                 return r;
3558
3559         if (!emulator_write_phys(vcpu, addr, &value, bytes))
3560                 return -EFAULT;
3561
3562         return 1;
3563 }
3564
3565 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3566 {
3567         (void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
3568         return 1;
3569 }
3570
3571 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3572 {
3573         spin_lock(&vcpu->kvm->mmu_lock);
3574         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3575         spin_unlock(&vcpu->kvm->mmu_lock);
3576         return 1;
3577 }
3578
3579 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3580                              struct kvm_pv_mmu_op_buffer *buffer)
3581 {
3582         struct kvm_mmu_op_header *header;
3583
3584         header = pv_mmu_peek_buffer(buffer, sizeof *header);
3585         if (!header)
3586                 return 0;
3587         switch (header->op) {
3588         case KVM_MMU_OP_WRITE_PTE: {
3589                 struct kvm_mmu_op_write_pte *wpte;
3590
3591                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3592                 if (!wpte)
3593                         return 0;
3594                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3595                                         wpte->pte_val);
3596         }
3597         case KVM_MMU_OP_FLUSH_TLB: {
3598                 struct kvm_mmu_op_flush_tlb *ftlb;
3599
3600                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3601                 if (!ftlb)
3602                         return 0;
3603                 return kvm_pv_mmu_flush_tlb(vcpu);
3604         }
3605         case KVM_MMU_OP_RELEASE_PT: {
3606                 struct kvm_mmu_op_release_pt *rpt;
3607
3608                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3609                 if (!rpt)
3610                         return 0;
3611                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3612         }
3613         default: return 0;
3614         }
3615 }
3616
3617 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3618                   gpa_t addr, unsigned long *ret)
3619 {
3620         int r;
3621         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3622
3623         buffer->ptr = buffer->buf;
3624         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3625         buffer->processed = 0;
3626
3627         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3628         if (r)
3629                 goto out;
3630
3631         while (buffer->len) {
3632                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3633                 if (r < 0)
3634                         goto out;
3635                 if (r == 0)
3636                         break;
3637         }
3638
3639         r = 1;
3640 out:
3641         *ret = buffer->processed;
3642         return r;
3643 }
3644
3645 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3646 {
3647         struct kvm_shadow_walk_iterator iterator;
3648         int nr_sptes = 0;
3649
3650         spin_lock(&vcpu->kvm->mmu_lock);
3651         for_each_shadow_entry(vcpu, addr, iterator) {
3652                 sptes[iterator.level-1] = *iterator.sptep;
3653                 nr_sptes++;
3654                 if (!is_shadow_present_pte(*iterator.sptep))
3655                         break;
3656         }
3657         spin_unlock(&vcpu->kvm->mmu_lock);
3658
3659         return nr_sptes;
3660 }
3661 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3662
3663 #ifdef CONFIG_KVM_MMU_AUDIT
3664 #include "mmu_audit.c"
3665 #endif