2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_host.h>
20 #include <linux/highmem.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_e500.h>
24 #include "../mm/mmu_decl.h"
29 #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
39 * This table provide mappings from:
40 * (guestAS,guestTID,guestPR) --> ID of physical cpu
45 * Each vcpu keeps one vcpu_id_table.
47 struct vcpu_id_table {
48 struct id id[2][NUM_TIDS][2];
52 * This table provide reversed mappings of vcpu_id_table:
53 * ID --> address of vcpu_id_table item.
54 * Each physical core has one pcpu_id_table.
56 struct pcpu_id_table {
57 struct id *entry[NUM_TIDS];
60 static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
62 /* This variable keeps last used shadow ID on local core.
63 * The valid range of shadow ID is [1..255] */
64 static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
66 static unsigned int tlb1_entry_num;
69 * Allocate a free shadow id and setup a valid sid mapping in given entry.
70 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
72 * The caller must have preemption disabled, and keep it that way until
73 * it has finished with the returned shadow id (either written into the
74 * TLB or arch.shadow_pid, or discarded).
76 static inline int local_sid_setup_one(struct id *entry)
81 sid = ++(__get_cpu_var(pcpu_last_used_sid));
83 __get_cpu_var(pcpu_sids).entry[sid] = entry;
85 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
90 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
91 * the caller will invalidate everything and start over.
93 * sid > NUM_TIDS indicates a race, which we disable preemption to
96 WARN_ON(sid > NUM_TIDS);
102 * Check if given entry contain a valid shadow id mapping.
103 * An ID mapping is considered valid only if
104 * both vcpu and pcpu know this mapping.
106 * The caller must have preemption disabled, and keep it that way until
107 * it has finished with the returned shadow id (either written into the
108 * TLB or arch.shadow_pid, or discarded).
110 static inline int local_sid_lookup(struct id *entry)
112 if (entry && entry->val != 0 &&
113 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
114 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
119 /* Invalidate all id mappings on local core */
120 static inline void local_sid_destroy_all(void)
123 __get_cpu_var(pcpu_last_used_sid) = 0;
124 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
128 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
130 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
131 return vcpu_e500->idt;
134 static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
136 kfree(vcpu_e500->idt);
139 /* Invalidate all mappings on vcpu */
140 static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
142 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
144 /* Update shadow pid when mappings are changed */
145 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
148 /* Invalidate one ID mapping on vcpu */
149 static inline void kvmppc_e500_id_table_reset_one(
150 struct kvmppc_vcpu_e500 *vcpu_e500,
151 int as, int pid, int pr)
153 struct vcpu_id_table *idt = vcpu_e500->idt;
156 BUG_ON(pid >= NUM_TIDS);
159 idt->id[as][pid][pr].val = 0;
160 idt->id[as][pid][pr].pentry = NULL;
162 /* Update shadow pid when mappings are changed */
163 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
167 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
168 * This function first lookup if a valid mapping exists,
169 * if not, then creates a new one.
171 * The caller must have preemption disabled, and keep it that way until
172 * it has finished with the returned shadow id (either written into the
173 * TLB or arch.shadow_pid, or discarded).
175 static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
176 unsigned int as, unsigned int gid,
177 unsigned int pr, int avoid_recursion)
179 struct vcpu_id_table *idt = vcpu_e500->idt;
183 BUG_ON(gid >= NUM_TIDS);
186 sid = local_sid_lookup(&idt->id[as][gid][pr]);
190 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
193 local_sid_destroy_all();
196 /* Update shadow pid when mappings are changed */
197 if (!avoid_recursion)
198 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
204 /* Map guest pid to shadow.
205 * We use PID to keep shadow of current guest non-zero PID,
206 * and use PID1 to keep shadow of guest zero PID.
207 * So that guest tlbe with TID=0 can be accessed at any time */
208 void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
211 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
212 get_cur_as(&vcpu_e500->vcpu),
213 get_cur_pid(&vcpu_e500->vcpu),
214 get_cur_pr(&vcpu_e500->vcpu), 1);
215 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
216 get_cur_as(&vcpu_e500->vcpu), 0,
217 get_cur_pr(&vcpu_e500->vcpu), 1);
221 void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
223 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
227 printk("| %8s | %8s | %8s | %8s | %8s |\n",
228 "nr", "mas1", "mas2", "mas3", "mas7");
230 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
231 printk("Guest TLB%d:\n", tlbsel);
232 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
233 tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
234 if (tlbe->mas1 & MAS1_VALID)
235 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
236 tlbsel, i, tlbe->mas1, tlbe->mas2,
237 tlbe->mas3, tlbe->mas7);
242 static inline unsigned int tlb0_get_next_victim(
243 struct kvmppc_vcpu_e500 *vcpu_e500)
247 victim = vcpu_e500->gtlb_nv[0]++;
248 if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
249 vcpu_e500->gtlb_nv[0] = 0;
254 static inline unsigned int tlb1_max_shadow_size(void)
256 /* reserve one entry for magic page */
257 return tlb1_entry_num - tlbcam_index - 1;
260 static inline int tlbe_is_writable(struct tlbe *tlbe)
262 return tlbe->mas3 & (MAS3_SW|MAS3_UW);
265 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
267 /* Mask off reserved bits. */
268 mas3 &= MAS3_ATTRIB_MASK;
271 /* Guest is in supervisor mode,
272 * so we need to translate guest
273 * supervisor permissions into user permissions. */
274 mas3 &= ~E500_TLB_USER_PERM_MASK;
275 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
278 return mas3 | E500_TLB_SUPER_PERM_MASK;
281 static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
284 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
286 return mas2 & MAS2_ATTRIB_MASK;
291 * writing shadow tlb entry to host TLB
293 static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
297 local_irq_save(flags);
298 mtspr(SPRN_MAS0, mas0);
299 mtspr(SPRN_MAS1, stlbe->mas1);
300 mtspr(SPRN_MAS2, stlbe->mas2);
301 mtspr(SPRN_MAS3, stlbe->mas3);
302 mtspr(SPRN_MAS7, stlbe->mas7);
303 asm volatile("isync; tlbwe" : : : "memory");
304 local_irq_restore(flags);
307 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
308 int tlbsel, int esel, struct tlbe *stlbe)
311 __write_host_tlbe(stlbe,
313 MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
315 __write_host_tlbe(stlbe,
317 MAS0_ESEL(to_htlb1_esel(esel)));
319 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
320 stlbe->mas3, stlbe->mas7);
323 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
325 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
327 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
331 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
332 get_page(pfn_to_page(pfn));
335 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
337 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
338 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
339 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
340 magic.mas3 = (pfn << PAGE_SHIFT) |
341 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
342 magic.mas7 = pfn >> (32 - PAGE_SHIFT);
344 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
348 void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
350 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
352 /* Shadow PID may be expired on local core */
353 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
356 void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
360 static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
361 int tlbsel, int esel)
363 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
364 struct vcpu_id_table *idt = vcpu_e500->idt;
365 unsigned int pr, tid, ts, pid;
369 ts = get_tlb_ts(gtlbe);
370 tid = get_tlb_tid(gtlbe);
374 /* One guest ID may be mapped to two shadow IDs */
375 for (pr = 0; pr < 2; pr++) {
377 * The shadow PID can have a valid mapping on at most one
378 * host CPU. In the common case, it will be valid on this
379 * CPU, in which case (for TLB0) we do a local invalidation
380 * of the specific address.
382 * If the shadow PID is not valid on the current host CPU, or
383 * if we're invalidating a TLB1 entry, we invalidate the
387 (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
388 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
393 * The guest is invalidating a TLB0 entry which is in a PID
394 * that has a valid shadow mapping on this host CPU. We
395 * search host TLB0 to invalidate it's shadow TLB entry,
396 * similar to __tlbil_va except that we need to look in AS1.
398 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
399 eaddr = get_tlb_eaddr(gtlbe);
401 local_irq_save(flags);
403 mtspr(SPRN_MAS6, val);
404 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
405 val = mfspr(SPRN_MAS1);
406 if (val & MAS1_VALID) {
407 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
408 asm volatile("tlbwe");
411 local_irq_restore(flags);
417 /* Search the guest TLB for a matching entry. */
418 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
419 gva_t eaddr, int tlbsel, unsigned int pid, int as)
423 /* XXX Replace loop with fancy data structures. */
424 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
425 struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
428 if (eaddr < get_tlb_eaddr(tlbe))
431 if (eaddr > get_tlb_end(tlbe))
434 tid = get_tlb_tid(tlbe);
435 if (tid && (tid != pid))
438 if (!get_tlb_v(tlbe))
441 if (get_tlb_ts(tlbe) != as && as != -1)
450 static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
455 priv->flags = E500_TLB_VALID;
457 if (tlbe_is_writable(gtlbe))
458 priv->flags |= E500_TLB_DIRTY;
461 static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
463 if (priv->flags & E500_TLB_VALID) {
464 if (priv->flags & E500_TLB_DIRTY)
465 kvm_release_pfn_dirty(priv->pfn);
467 kvm_release_pfn_clean(priv->pfn);
473 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
474 unsigned int eaddr, int as)
476 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
477 unsigned int victim, pidsel, tsized;
480 /* since we only have two TLBs, only lower bit is used. */
481 tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
482 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
483 pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
484 tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
486 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
487 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
488 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
489 | MAS1_TID(vcpu_e500->pid[pidsel])
490 | MAS1_TSIZE(tsized);
491 vcpu_e500->mas2 = (eaddr & MAS2_EPN)
492 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
493 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
494 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
495 | (get_cur_pid(vcpu) << 16)
496 | (as ? MAS6_SAS : 0);
500 static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
501 struct tlbe *gtlbe, int tsize,
502 struct tlbe_priv *priv,
503 u64 gvaddr, struct tlbe *stlbe)
505 pfn_t pfn = priv->pfn;
508 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
510 get_cur_pr(&vcpu_e500->vcpu), 0);
512 /* Force TS=1 IPROT=0 for all guest mappings. */
513 stlbe->mas1 = MAS1_TSIZE(tsize)
514 | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
515 stlbe->mas2 = (gvaddr & MAS2_EPN)
516 | e500_shadow_mas2_attrib(gtlbe->mas2,
517 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
518 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
519 | e500_shadow_mas3_attrib(gtlbe->mas3,
520 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
521 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
525 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
526 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
529 struct kvm_memory_slot *slot;
530 unsigned long pfn, hva;
532 int tsize = BOOK3E_PAGESZ_4K;
533 struct tlbe_priv *priv;
536 * Translate guest physical to true physical, acquiring
537 * a page reference if it is normal, non-reserved memory.
539 * gfn_to_memslot() must succeed because otherwise we wouldn't
540 * have gotten this far. Eventually we should just pass the slot
541 * pointer through from the first lookup.
543 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
544 hva = gfn_to_hva_memslot(slot, gfn);
547 struct vm_area_struct *vma;
548 down_read(¤t->mm->mmap_sem);
550 vma = find_vma(current->mm, hva);
551 if (vma && hva >= vma->vm_start &&
552 (vma->vm_flags & VM_PFNMAP)) {
554 * This VMA is a physically contiguous region (e.g.
555 * /dev/mem) that bypasses normal Linux page
556 * management. Find the overlap between the
557 * vma and the memslot.
560 unsigned long start, end;
561 unsigned long slot_start, slot_end;
565 start = vma->vm_pgoff;
567 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
569 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
571 slot_start = pfn - (gfn - slot->base_gfn);
572 slot_end = slot_start + slot->npages;
574 if (start < slot_start)
579 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
583 * e500 doesn't implement the lowest tsize bit,
586 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
589 * Now find the largest tsize (up to what the guest
590 * requested) that will cover gfn, stay within the
591 * range, and for which gfn and pfn are mutually
595 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
596 unsigned long gfn_start, gfn_end, tsize_pages;
597 tsize_pages = 1 << (tsize - 2);
599 gfn_start = gfn & ~(tsize_pages - 1);
600 gfn_end = gfn_start + tsize_pages;
602 if (gfn_start + pfn - gfn < start)
604 if (gfn_end + pfn - gfn > end)
606 if ((gfn & (tsize_pages - 1)) !=
607 (pfn & (tsize_pages - 1)))
610 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
611 pfn &= ~(tsize_pages - 1);
616 up_read(¤t->mm->mmap_sem);
619 if (likely(!pfnmap)) {
620 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
621 if (is_error_pfn(pfn)) {
622 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
624 kvm_release_pfn_clean(pfn);
629 /* Drop old priv and setup new one. */
630 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
631 kvmppc_e500_priv_release(priv);
632 kvmppc_e500_priv_setup(priv, gtlbe, pfn);
634 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
637 /* XXX only map the one-one case, for now use TLB0 */
638 static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
639 int esel, struct tlbe *stlbe)
643 gtlbe = &vcpu_e500->gtlb_arch[0][esel];
645 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
646 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
647 gtlbe, 0, esel, stlbe);
652 /* Caller must ensure that the specified guest TLB entry is safe to insert into
654 /* XXX for both one-one and one-to-many , for now use TLB1 */
655 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
656 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
660 victim = vcpu_e500->gtlb_nv[1]++;
662 if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
663 vcpu_e500->gtlb_nv[1] = 0;
665 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
670 void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
672 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
674 /* Recalc shadow pid since MSR changes */
675 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
678 static inline int kvmppc_e500_gtlbe_invalidate(
679 struct kvmppc_vcpu_e500 *vcpu_e500,
680 int tlbsel, int esel)
682 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
684 if (unlikely(get_tlb_iprot(gtlbe)))
692 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
696 if (value & MMUCSR0_TLB0FI)
697 for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
698 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
699 if (value & MMUCSR0_TLB1FI)
700 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
701 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
703 /* Invalidate all vcpu id mappings */
704 kvmppc_e500_id_table_reset_all(vcpu_e500);
709 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
711 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
716 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
718 ia = (ea >> 2) & 0x1;
720 /* since we only have two TLBs, only lower bit is used. */
721 tlbsel = (ea >> 3) & 0x1;
724 /* invalidate all entries */
725 for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
726 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
729 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
730 get_cur_pid(vcpu), -1);
732 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
735 /* Invalidate all vcpu id mappings */
736 kvmppc_e500_id_table_reset_all(vcpu_e500);
741 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
743 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
747 tlbsel = get_tlb_tlbsel(vcpu_e500);
748 esel = get_tlb_esel(vcpu_e500, tlbsel);
750 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
751 vcpu_e500->mas0 &= ~MAS0_NV(~0);
752 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
753 vcpu_e500->mas1 = gtlbe->mas1;
754 vcpu_e500->mas2 = gtlbe->mas2;
755 vcpu_e500->mas3 = gtlbe->mas3;
756 vcpu_e500->mas7 = gtlbe->mas7;
761 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
763 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
764 int as = !!get_cur_sas(vcpu_e500);
765 unsigned int pid = get_cur_spid(vcpu_e500);
767 struct tlbe *gtlbe = NULL;
770 ea = kvmppc_get_gpr(vcpu, rb);
772 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
773 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
775 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
781 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
782 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
783 vcpu_e500->mas1 = gtlbe->mas1;
784 vcpu_e500->mas2 = gtlbe->mas2;
785 vcpu_e500->mas3 = gtlbe->mas3;
786 vcpu_e500->mas7 = gtlbe->mas7;
790 /* since we only have two TLBs, only lower bit is used. */
791 tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
792 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
794 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
795 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
796 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
797 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
798 | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
799 vcpu_e500->mas2 &= MAS2_EPN;
800 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
801 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
805 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
809 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
811 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
815 tlbsel = get_tlb_tlbsel(vcpu_e500);
816 esel = get_tlb_esel(vcpu_e500, tlbsel);
818 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
820 if (get_tlb_v(gtlbe))
821 kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
823 gtlbe->mas1 = vcpu_e500->mas1;
824 gtlbe->mas2 = vcpu_e500->mas2;
825 gtlbe->mas3 = vcpu_e500->mas3;
826 gtlbe->mas7 = vcpu_e500->mas7;
828 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
829 gtlbe->mas3, gtlbe->mas7);
831 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
832 if (tlbe_is_host_safe(vcpu, gtlbe)) {
842 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
843 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
846 sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
852 eaddr = get_tlb_eaddr(gtlbe);
853 raddr = get_tlb_raddr(gtlbe);
855 /* Create a 4KB mapping on the host.
856 * If the guest wanted a large page,
857 * only the first 4KB is mapped here and the rest
858 * are mapped on the fly. */
860 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
861 raddr >> PAGE_SHIFT, gtlbe, &stlbe);
867 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
871 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
875 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
877 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
879 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
882 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
884 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
886 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
889 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
891 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
893 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
896 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
898 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
900 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
903 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
906 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
908 &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
909 u64 pgmask = get_tlb_bytes(gtlbe) - 1;
911 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
914 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
918 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
921 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
922 struct tlbe_priv *priv;
923 struct tlbe *gtlbe, stlbe;
924 int tlbsel = tlbsel_of(index);
925 int esel = esel_of(index);
928 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
935 priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];
937 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
938 priv, eaddr, &stlbe);
942 gfn_t gfn = gpaddr >> PAGE_SHIFT;
945 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
955 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
959 int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
960 gva_t eaddr, unsigned int pid, int as)
962 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
965 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
966 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
968 return index_of(tlbsel, esel);
974 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
976 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
978 if (vcpu->arch.pid != pid) {
979 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
980 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
984 void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
988 /* Insert large initial mapping for guest. */
989 tlbe = &vcpu_e500->gtlb_arch[1][0];
990 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
992 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
995 /* 4K map for serial output. Used by kernel wrapper. */
996 tlbe = &vcpu_e500->gtlb_arch[1][1];
997 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
998 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
999 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1003 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1005 tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
1007 vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
1008 vcpu_e500->gtlb_arch[0] =
1009 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
1010 if (vcpu_e500->gtlb_arch[0] == NULL)
1013 vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
1014 vcpu_e500->gtlb_arch[1] =
1015 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
1016 if (vcpu_e500->gtlb_arch[1] == NULL)
1017 goto err_out_guest0;
1019 vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
1020 kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
1021 if (vcpu_e500->gtlb_priv[0] == NULL)
1022 goto err_out_guest1;
1023 vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
1024 kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
1026 if (vcpu_e500->gtlb_priv[1] == NULL)
1029 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
1032 /* Init TLB configuration register */
1033 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
1034 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
1035 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
1036 vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
1041 kfree(vcpu_e500->gtlb_priv[1]);
1043 kfree(vcpu_e500->gtlb_priv[0]);
1045 kfree(vcpu_e500->gtlb_arch[1]);
1047 kfree(vcpu_e500->gtlb_arch[0]);
1052 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1056 /* release all privs */
1057 for (stlbsel = 0; stlbsel < 2; stlbsel++)
1058 for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
1059 struct tlbe_priv *priv =
1060 &vcpu_e500->gtlb_priv[stlbsel][i];
1061 kvmppc_e500_priv_release(priv);
1064 kvmppc_e500_id_table_free(vcpu_e500);
1065 kfree(vcpu_e500->gtlb_arch[1]);
1066 kfree(vcpu_e500->gtlb_arch[0]);