2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
8 * Alexander Graf, agraf@suse.de
11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
12 * by Hollis Blanchard <hollisb@us.ibm.com>.
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/kvm.h>
24 #include <linux/kvm_host.h>
25 #include <linux/highmem.h>
26 #include <linux/log2.h>
27 #include <linux/uaccess.h>
28 #include <linux/sched.h>
29 #include <linux/rwsem.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hugetlb.h>
32 #include <asm/kvm_ppc.h>
37 #include "e500_mmu_host.h"
39 static inline unsigned int gtlb0_get_next_victim(
40 struct kvmppc_vcpu_e500 *vcpu_e500)
44 victim = vcpu_e500->gtlb_nv[0]++;
45 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
46 vcpu_e500->gtlb_nv[0] = 0;
51 static int tlb0_set_base(gva_t addr, int sets, int ways)
55 set_base = (addr >> PAGE_SHIFT) & (sets - 1);
61 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
63 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
64 vcpu_e500->gtlb_params[0].ways);
67 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
69 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
70 int esel = get_tlb_esel_bit(vcpu);
73 esel &= vcpu_e500->gtlb_params[0].ways - 1;
74 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
76 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
82 /* Search the guest TLB for a matching entry. */
83 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
84 gva_t eaddr, int tlbsel, unsigned int pid, int as)
86 int size = vcpu_e500->gtlb_params[tlbsel].entries;
87 unsigned int set_base, offset;
91 set_base = gtlb0_set_base(vcpu_e500, eaddr);
92 size = vcpu_e500->gtlb_params[0].ways;
94 if (eaddr < vcpu_e500->tlb1_min_eaddr ||
95 eaddr > vcpu_e500->tlb1_max_eaddr)
100 offset = vcpu_e500->gtlb_offset[tlbsel];
102 for (i = 0; i < size; i++) {
103 struct kvm_book3e_206_tlb_entry *tlbe =
104 &vcpu_e500->gtlb_arch[offset + set_base + i];
107 if (eaddr < get_tlb_eaddr(tlbe))
110 if (eaddr > get_tlb_end(tlbe))
113 tid = get_tlb_tid(tlbe);
114 if (tid && (tid != pid))
117 if (!get_tlb_v(tlbe))
120 if (get_tlb_ts(tlbe) != as && as != -1)
129 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
130 unsigned int eaddr, int as)
132 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
133 unsigned int victim, tsized;
136 /* since we only have two TLBs, only lower bit is used. */
137 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
138 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
139 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
141 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
142 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
143 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
144 | MAS1_TID(get_tlbmiss_tid(vcpu))
145 | MAS1_TSIZE(tsized);
146 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
147 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
148 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
149 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
150 | (get_cur_pid(vcpu) << 16)
151 | (as ? MAS6_SAS : 0);
154 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
156 int size = vcpu_e500->gtlb_params[1].entries;
161 vcpu_e500->tlb1_min_eaddr = ~0UL;
162 vcpu_e500->tlb1_max_eaddr = 0;
163 offset = vcpu_e500->gtlb_offset[1];
165 for (i = 0; i < size; i++) {
166 struct kvm_book3e_206_tlb_entry *tlbe =
167 &vcpu_e500->gtlb_arch[offset + i];
169 if (!get_tlb_v(tlbe))
172 eaddr = get_tlb_eaddr(tlbe);
173 vcpu_e500->tlb1_min_eaddr =
174 min(vcpu_e500->tlb1_min_eaddr, eaddr);
176 eaddr = get_tlb_end(tlbe);
177 vcpu_e500->tlb1_max_eaddr =
178 max(vcpu_e500->tlb1_max_eaddr, eaddr);
182 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
183 struct kvm_book3e_206_tlb_entry *gtlbe)
185 unsigned long start, end, size;
187 size = get_tlb_bytes(gtlbe);
188 start = get_tlb_eaddr(gtlbe) & ~(size - 1);
189 end = start + size - 1;
191 return vcpu_e500->tlb1_min_eaddr == start ||
192 vcpu_e500->tlb1_max_eaddr == end;
195 /* This function is supposed to be called for a adding a new valid tlb entry */
196 static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
197 struct kvm_book3e_206_tlb_entry *gtlbe)
199 unsigned long start, end, size;
200 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
202 if (!get_tlb_v(gtlbe))
205 size = get_tlb_bytes(gtlbe);
206 start = get_tlb_eaddr(gtlbe) & ~(size - 1);
207 end = start + size - 1;
209 vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
210 vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
213 static inline int kvmppc_e500_gtlbe_invalidate(
214 struct kvmppc_vcpu_e500 *vcpu_e500,
215 int tlbsel, int esel)
217 struct kvm_book3e_206_tlb_entry *gtlbe =
218 get_entry(vcpu_e500, tlbsel, esel);
220 if (unlikely(get_tlb_iprot(gtlbe)))
223 if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
224 kvmppc_recalc_tlb1map_range(vcpu_e500);
231 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
235 if (value & MMUCSR0_TLB0FI)
236 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
237 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
238 if (value & MMUCSR0_TLB1FI)
239 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
240 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
242 /* Invalidate all host shadow mappings */
243 kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
248 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
250 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
254 ia = (ea >> 2) & 0x1;
256 /* since we only have two TLBs, only lower bit is used. */
257 tlbsel = (ea >> 3) & 0x1;
260 /* invalidate all entries */
261 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
263 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
266 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
267 get_cur_pid(vcpu), -1);
269 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
272 /* Invalidate all host shadow mappings */
273 kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
278 static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
281 struct kvm_book3e_206_tlb_entry *tlbe;
284 /* invalidate all entries */
285 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
286 tlbe = get_entry(vcpu_e500, tlbsel, esel);
287 tid = get_tlb_tid(tlbe);
288 if (type == 0 || tid == pid) {
289 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
290 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
295 static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
300 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
301 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
303 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
304 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
310 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
312 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
313 int pid = get_cur_spid(vcpu);
315 if (type == 0 || type == 1) {
316 tlbilx_all(vcpu_e500, 0, pid, type);
317 tlbilx_all(vcpu_e500, 1, pid, type);
318 } else if (type == 3) {
319 tlbilx_one(vcpu_e500, pid, ea);
325 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
327 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
329 struct kvm_book3e_206_tlb_entry *gtlbe;
331 tlbsel = get_tlb_tlbsel(vcpu);
332 esel = get_tlb_esel(vcpu, tlbsel);
334 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
335 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
336 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
337 vcpu->arch.shared->mas1 = gtlbe->mas1;
338 vcpu->arch.shared->mas2 = gtlbe->mas2;
339 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
344 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
346 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
347 int as = !!get_cur_sas(vcpu);
348 unsigned int pid = get_cur_spid(vcpu);
350 struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
352 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
353 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
355 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
361 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
363 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
364 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
365 vcpu->arch.shared->mas1 = gtlbe->mas1;
366 vcpu->arch.shared->mas2 = gtlbe->mas2;
367 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
371 /* since we only have two TLBs, only lower bit is used. */
372 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
373 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
375 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
377 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
378 vcpu->arch.shared->mas1 =
379 (vcpu->arch.shared->mas6 & MAS6_SPID0)
380 | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
381 | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
382 vcpu->arch.shared->mas2 &= MAS2_EPN;
383 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
385 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
389 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
393 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
395 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
396 struct kvm_book3e_206_tlb_entry *gtlbe;
400 tlbsel = get_tlb_tlbsel(vcpu);
401 esel = get_tlb_esel(vcpu, tlbsel);
403 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
405 if (get_tlb_v(gtlbe)) {
406 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
408 kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
412 gtlbe->mas1 = vcpu->arch.shared->mas1;
413 gtlbe->mas2 = vcpu->arch.shared->mas2;
414 if (!(vcpu->arch.shared->msr & MSR_CM))
415 gtlbe->mas2 &= 0xffffffffUL;
416 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
418 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
419 gtlbe->mas2, gtlbe->mas7_3);
423 * If a valid tlb1 entry is overwritten then recalculate the
424 * min/max TLB1 map address range otherwise no need to look
428 kvmppc_recalc_tlb1map_range(vcpu_e500);
430 kvmppc_set_tlb1map_range(vcpu, gtlbe);
433 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
434 if (tlbe_is_host_safe(vcpu, gtlbe)) {
435 u64 eaddr = get_tlb_eaddr(gtlbe);
436 u64 raddr = get_tlb_raddr(gtlbe);
439 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
440 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
443 /* Premap the faulting page */
444 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
447 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
451 static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
452 gva_t eaddr, unsigned int pid, int as)
454 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
457 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
458 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
460 return index_of(tlbsel, esel);
466 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
467 int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
468 struct kvm_translation *tr)
475 eaddr = tr->linear_address;
476 pid = (tr->linear_address >> 32) & 0xff;
477 as = (tr->linear_address >> 40) & 0x1;
479 index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
485 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
486 /* XXX what does "writeable" and "usermode" even mean? */
493 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
495 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
497 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
500 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
502 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
504 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
507 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
509 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
511 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
514 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
516 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
518 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
521 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
524 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
525 struct kvm_book3e_206_tlb_entry *gtlbe;
528 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
529 pgmask = get_tlb_bytes(gtlbe) - 1;
531 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
534 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
538 /*****************************************/
540 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
544 kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
545 kfree(vcpu_e500->g2h_tlb1_map);
546 kfree(vcpu_e500->gtlb_priv[0]);
547 kfree(vcpu_e500->gtlb_priv[1]);
549 if (vcpu_e500->shared_tlb_pages) {
550 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
553 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
554 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
555 put_page(vcpu_e500->shared_tlb_pages[i]);
558 vcpu_e500->num_shared_tlb_pages = 0;
560 kfree(vcpu_e500->shared_tlb_pages);
561 vcpu_e500->shared_tlb_pages = NULL;
563 kfree(vcpu_e500->gtlb_arch);
566 vcpu_e500->gtlb_arch = NULL;
569 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
571 sregs->u.e.mas0 = vcpu->arch.shared->mas0;
572 sregs->u.e.mas1 = vcpu->arch.shared->mas1;
573 sregs->u.e.mas2 = vcpu->arch.shared->mas2;
574 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
575 sregs->u.e.mas4 = vcpu->arch.shared->mas4;
576 sregs->u.e.mas6 = vcpu->arch.shared->mas6;
578 sregs->u.e.mmucfg = vcpu->arch.mmucfg;
579 sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
580 sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
581 sregs->u.e.tlbcfg[2] = 0;
582 sregs->u.e.tlbcfg[3] = 0;
585 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
587 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
588 vcpu->arch.shared->mas0 = sregs->u.e.mas0;
589 vcpu->arch.shared->mas1 = sregs->u.e.mas1;
590 vcpu->arch.shared->mas2 = sregs->u.e.mas2;
591 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
592 vcpu->arch.shared->mas4 = sregs->u.e.mas4;
593 vcpu->arch.shared->mas6 = sregs->u.e.mas6;
599 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
600 struct kvm_config_tlb *cfg)
602 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
603 struct kvm_book3e_206_tlb_params params;
606 struct tlbe_priv *privs[2] = {};
607 u64 *g2h_bitmap = NULL;
610 int num_pages, ret, i;
612 if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
615 if (copy_from_user(¶ms, (void __user *)(uintptr_t)cfg->params,
619 if (params.tlb_sizes[1] > 64)
621 if (params.tlb_ways[1] != params.tlb_sizes[1])
623 if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
625 if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
628 if (!is_power_of_2(params.tlb_ways[0]))
631 sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
632 if (!is_power_of_2(sets))
635 array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
636 array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
638 if (cfg->array_len < array_len)
641 num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
642 cfg->array / PAGE_SIZE;
643 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
647 ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
651 if (ret != num_pages) {
657 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
663 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
665 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
668 if (!privs[0] || !privs[1]) {
673 g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
680 free_gtlb(vcpu_e500);
682 vcpu_e500->gtlb_priv[0] = privs[0];
683 vcpu_e500->gtlb_priv[1] = privs[1];
684 vcpu_e500->g2h_tlb1_map = g2h_bitmap;
686 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
687 (virt + (cfg->array & (PAGE_SIZE - 1)));
689 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
690 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
692 vcpu_e500->gtlb_offset[0] = 0;
693 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
695 vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
697 vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
698 if (params.tlb_sizes[0] <= 2048)
699 vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
700 vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
702 vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
703 vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
704 vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
706 vcpu_e500->shared_tlb_pages = pages;
707 vcpu_e500->num_shared_tlb_pages = num_pages;
709 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
710 vcpu_e500->gtlb_params[0].sets = sets;
712 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
713 vcpu_e500->gtlb_params[1].sets = 1;
715 kvmppc_recalc_tlb1map_range(vcpu_e500);
723 for (i = 0; i < num_pages; i++)
731 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
732 struct kvm_dirty_tlb *dirty)
734 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
735 kvmppc_recalc_tlb1map_range(vcpu_e500);
736 kvmppc_core_flush_tlb(vcpu);
740 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
742 struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
743 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
744 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
746 if (e500_mmu_host_init(vcpu_e500))
749 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
750 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
752 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
753 vcpu_e500->gtlb_params[0].sets =
754 KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
756 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
757 vcpu_e500->gtlb_params[1].sets = 1;
759 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
760 if (!vcpu_e500->gtlb_arch)
763 vcpu_e500->gtlb_offset[0] = 0;
764 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
766 vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
767 vcpu_e500->gtlb_params[0].entries,
769 if (!vcpu_e500->gtlb_priv[0])
772 vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
773 vcpu_e500->gtlb_params[1].entries,
775 if (!vcpu_e500->gtlb_priv[1])
778 vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
779 vcpu_e500->gtlb_params[1].entries,
781 if (!vcpu_e500->g2h_tlb1_map)
784 /* Init TLB configuration register */
785 vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
786 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
787 vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
788 vcpu->arch.tlbcfg[0] |=
789 vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
791 vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
792 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
793 vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
794 vcpu->arch.tlbcfg[1] |=
795 vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
797 kvmppc_recalc_tlb1map_range(vcpu_e500);
801 free_gtlb(vcpu_e500);
805 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
807 free_gtlb(vcpu_e500);
808 e500_mmu_host_uninit(vcpu_e500);