KVM: PPC: Convert MSR to shared page
authorAlexander Graf <agraf@suse.de>
Thu, 29 Jul 2010 12:47:43 +0000 (14:47 +0200)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:50:43 +0000 (10:50 +0200)
One of the most obvious registers to share with the guest directly is the
MSR. The MSR contains the "interrupts enabled" flag which the guest has to
toggle in critical sections.

So in order to bring the overhead of interrupt en- and disabling down, let's
put msr into the shared page. Keep in mind that even though you can fully read
its contents, writing to it doesn't always update all state. There are a few
safe fields that don't require hypervisor interaction. See the documentation
for a list of MSR bits that are safe to be set from inside the guest.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
18 files changed:
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_para.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/44x_tlb.c
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_32_mmu.c
arch/powerpc/kvm/book3s_32_mmu_host.c
arch/powerpc/kvm/book3s_64_mmu.c
arch/powerpc/kvm/book3s_64_mmu_host.c
arch/powerpc/kvm/book3s_emulate.c
arch/powerpc/kvm/book3s_paired_singles.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke.h
arch/powerpc/kvm/booke_emulate.c
arch/powerpc/kvm/booke_interrupts.S
arch/powerpc/kvm/e500_tlb.c
arch/powerpc/kvm/e500_tlb.h
arch/powerpc/kvm/powerpc.c

index 53edacdf6940635da9529701201ada1edf1d3b4b..ba20f90655f38b2a8214df6dd6b626a5286e698c 100644 (file)
@@ -211,7 +211,6 @@ struct kvm_vcpu_arch {
        u32 cr;
 #endif
 
-       ulong msr;
 #ifdef CONFIG_PPC_BOOK3S
        ulong shadow_msr;
        ulong hflags;
index 1485ba87a52aa47de4491ea628633fb79e64f020..a17dc5229d9971fabc19bc41d63b311fa13fc476 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/types.h>
 
 struct kvm_vcpu_arch_shared {
+       __u64 msr;
 };
 
 #ifdef __KERNEL__
index 60e7db4c13af0720151864a75b2292a4aab9c986..1221bcdff52fb3df7260146437030f37f5f5c082 100644 (file)
@@ -394,13 +394,13 @@ int main(void)
        DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
        DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
        DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
-       DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
        DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
        DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
        DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
        DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
        DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
        DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
+       DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
 
        /* book3s */
 #ifdef CONFIG_PPC_BOOK3S
index 9b9b5cdea840bcdfb87128687dab6f0d41726446..9f71b8d6eb0d904911427ed6052e2bbf40ab6929 100644 (file)
@@ -221,14 +221,14 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
 
 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 {
-       unsigned int as = !!(vcpu->arch.msr & MSR_IS);
+       unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 
        return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
 }
 
 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 {
-       unsigned int as = !!(vcpu->arch.msr & MSR_DS);
+       unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 
        return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
 }
@@ -354,7 +354,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
 
        stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
        stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
-                                                   vcpu->arch.msr & MSR_PR);
+                                                   vcpu->arch.shared->msr & MSR_PR);
        stlbe.tid = !(asid & 0xff);
 
        /* Keep track of the reference so we can properly release it later. */
@@ -423,7 +423,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
 
        /* Does it match current guest AS? */
        /* XXX what about IS != DS? */
-       if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
+       if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
                return 0;
 
        gpa = get_tlb_raddr(tlbe);
index b3385dd6f28d0dde4845a88a07dea1f80f633586..2efe69240e1b1e47a62310b8876f8aa019f775f0 100644 (file)
@@ -115,31 +115,31 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
 
 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.shadow_msr = vcpu->arch.msr;
+       ulong smsr = vcpu->arch.shared->msr;
+
        /* Guest MSR values */
-       vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE |
-                                MSR_BE | MSR_DE;
+       smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
        /* Process MSR values */
-       vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR |
-                                MSR_EE;
+       smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
        /* External providers the guest reserved */
-       vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext);
+       smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
        /* 64-bit Process MSR values */
 #ifdef CONFIG_PPC_BOOK3S_64
-       vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV;
+       smsr |= MSR_ISF | MSR_HV;
 #endif
+       vcpu->arch.shadow_msr = smsr;
 }
 
 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
 {
-       ulong old_msr = vcpu->arch.msr;
+       ulong old_msr = vcpu->arch.shared->msr;
 
 #ifdef EXIT_DEBUG
        printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
 #endif
 
        msr &= to_book3s(vcpu)->msr_mask;
-       vcpu->arch.msr = msr;
+       vcpu->arch.shared->msr = msr;
        kvmppc_recalc_shadow_msr(vcpu);
 
        if (msr & (MSR_WE|MSR_POW)) {
@@ -149,21 +149,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
                }
        }
 
-       if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+       if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
                   (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
                kvmppc_mmu_flush_segments(vcpu);
                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
        }
 
        /* Preload FPU if it's enabled */
-       if (vcpu->arch.msr & MSR_FP)
+       if (vcpu->arch.shared->msr & MSR_FP)
                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
 }
 
 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
 {
        vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
-       vcpu->arch.srr1 = vcpu->arch.msr | flags;
+       vcpu->arch.srr1 = vcpu->arch.shared->msr | flags;
        kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
        vcpu->arch.mmu.reset_msr(vcpu);
 }
@@ -254,11 +254,11 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
 
        switch (priority) {
        case BOOK3S_IRQPRIO_DECREMENTER:
-               deliver = vcpu->arch.msr & MSR_EE;
+               deliver = vcpu->arch.shared->msr & MSR_EE;
                vec = BOOK3S_INTERRUPT_DECREMENTER;
                break;
        case BOOK3S_IRQPRIO_EXTERNAL:
-               deliver = vcpu->arch.msr & MSR_EE;
+               deliver = vcpu->arch.shared->msr & MSR_EE;
                vec = BOOK3S_INTERRUPT_EXTERNAL;
                break;
        case BOOK3S_IRQPRIO_SYSTEM_RESET:
@@ -437,7 +437,7 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
                         struct kvmppc_pte *pte)
 {
-       int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR));
+       int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
        int r;
 
        if (relocated) {
@@ -545,8 +545,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
        int page_found = 0;
        struct kvmppc_pte pte;
        bool is_mmio = false;
-       bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
-       bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
+       bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
+       bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
        u64 vsid;
 
        relocated = data ? dr : ir;
@@ -563,7 +563,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                pte.vpage = eaddr >> 12;
        }
 
-       switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+       switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
        case 0:
                pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
                break;
@@ -571,7 +571,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
        case MSR_IR:
                vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
 
-               if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
+               if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
                        pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
                else
                        pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
@@ -596,14 +596,16 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                /* Page not found in guest PTE entries */
                vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
                to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
-               vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+               vcpu->arch.shared->msr |=
+                       (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
                kvmppc_book3s_queue_irqprio(vcpu, vec);
        } else if (page_found == -EPERM) {
                /* Storage protection */
                vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
                to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
                to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
-               vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+               vcpu->arch.shared->msr |=
+                       (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
                kvmppc_book3s_queue_irqprio(vcpu, vec);
        } else if (page_found == -EINVAL) {
                /* Page not found in guest SLB */
@@ -695,9 +697,11 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
 
        ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
        if (ret == -ENOENT) {
-               vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
-               vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
-               vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
+               ulong msr = vcpu->arch.shared->msr;
+
+               msr = kvmppc_set_field(msr, 33, 33, 1);
+               msr = kvmppc_set_field(msr, 34, 36, 0);
+               vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
                kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
                return EMULATE_AGAIN;
        }
@@ -736,7 +740,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
        if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
                return RESUME_GUEST;
 
-       if (!(vcpu->arch.msr & msr)) {
+       if (!(vcpu->arch.shared->msr & msr)) {
                kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                return RESUME_GUEST;
        }
@@ -804,7 +808,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        if ((exit_nr != 0x900) && (exit_nr != 0x500))
                printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
                        exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
-                       vcpu->arch.msr);
+                       vcpu->arch.shared->msr);
 #endif
        kvm_resched(vcpu);
        switch (exit_nr) {
@@ -836,7 +840,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
                        r = RESUME_GUEST;
                } else {
-                       vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
+                       vcpu->arch.shared->msr |=
+                               to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
                        kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
                        kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
                        r = RESUME_GUEST;
@@ -904,7 +909,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 program_interrupt:
                flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
 
-               if (vcpu->arch.msr & MSR_PR) {
+               if (vcpu->arch.shared->msr & MSR_PR) {
 #ifdef EXIT_DEBUG
                        printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
 #endif
@@ -1052,7 +1057,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->ctr = kvmppc_get_ctr(vcpu);
        regs->lr = kvmppc_get_lr(vcpu);
        regs->xer = kvmppc_get_xer(vcpu);
-       regs->msr = vcpu->arch.msr;
+       regs->msr = vcpu->arch.shared->msr;
        regs->srr0 = vcpu->arch.srr0;
        regs->srr1 = vcpu->arch.srr1;
        regs->pid = vcpu->arch.pid;
@@ -1353,7 +1358,7 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        local_irq_enable();
 
        /* Preload FPU if it's enabled */
-       if (vcpu->arch.msr & MSR_FP)
+       if (vcpu->arch.shared->msr & MSR_FP)
                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
 
        ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
index 3292d76101d2eefa6808b0bf72dd81c3fed1eba5..449bce5f021aa9d3416c2700ad3474ee8c653faf 100644 (file)
@@ -133,7 +133,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
                else
                        bat = &vcpu_book3s->ibat[i];
 
-               if (vcpu->arch.msr & MSR_PR) {
+               if (vcpu->arch.shared->msr & MSR_PR) {
                        if (!bat->vp)
                                continue;
                } else {
@@ -214,8 +214,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
                        pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
                        pp = pteg[i+1] & 3;
 
-                       if ((sre->Kp &&  (vcpu->arch.msr & MSR_PR)) ||
-                           (sre->Ks && !(vcpu->arch.msr & MSR_PR)))
+                       if ((sre->Kp &&  (vcpu->arch.shared->msr & MSR_PR)) ||
+                           (sre->Ks && !(vcpu->arch.shared->msr & MSR_PR)))
                                pp |= 4;
 
                        pte->may_write = false;
@@ -334,7 +334,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
        struct kvmppc_sr *sr;
        u64 gvsid = esid;
 
-       if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+       if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
                sr = find_sr(to_book3s(vcpu), ea);
                if (sr->valid)
                        gvsid = sr->vsid;
@@ -343,7 +343,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
        /* In case we only have one of MSR_IR or MSR_DR set, let's put
           that in the real-mode context (and hope RM doesn't access
           high memory) */
-       switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+       switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
        case 0:
                *vsid = VSID_REAL | esid;
                break;
@@ -363,7 +363,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                BUG();
        }
 
-       if (vcpu->arch.msr & MSR_PR)
+       if (vcpu->arch.shared->msr & MSR_PR)
                *vsid |= VSID_PR;
 
        return 0;
index 0b51ef872c1e629d7c9697694c029ca6524df4df..67b8c38d932f6649c6beb60ba171366fec783eba 100644 (file)
@@ -86,7 +86,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
        struct kvmppc_sid_map *map;
        u16 sid_map_mask;
 
-       if (vcpu->arch.msr & MSR_PR)
+       if (vcpu->arch.shared->msr & MSR_PR)
                gvsid |= VSID_PR;
 
        sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -253,7 +253,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
        u16 sid_map_mask;
        static int backwards_map = 0;
 
-       if (vcpu->arch.msr & MSR_PR)
+       if (vcpu->arch.shared->msr & MSR_PR)
                gvsid |= VSID_PR;
 
        /* We might get collisions that trap in preceding order, so let's
index 4025ea26b3c1df7c8f27dd38e1ddddc1087b38ed..58aa8409dae09a669d1fa5d0014aedd5ed2c065a 100644 (file)
@@ -180,9 +180,9 @@ do_second:
                goto no_page_found;
        }
 
-       if ((vcpu->arch.msr & MSR_PR) && slbe->Kp)
+       if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
                key = 4;
-       else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks)
+       else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
                key = 4;
 
        for (i=0; i<16; i+=2) {
@@ -381,7 +381,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
        for (i = 1; i < vcpu_book3s->slb_nr; i++)
                vcpu_book3s->slb[i].valid = false;
 
-       if (vcpu->arch.msr & MSR_IR) {
+       if (vcpu->arch.shared->msr & MSR_IR) {
                kvmppc_mmu_flush_segments(vcpu);
                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
        }
@@ -446,13 +446,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
        struct kvmppc_slb *slb;
        u64 gvsid = esid;
 
-       if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+       if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
                slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
                if (slb)
                        gvsid = slb->vsid;
        }
 
-       switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+       switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
        case 0:
                *vsid = VSID_REAL | esid;
                break;
@@ -473,7 +473,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
                break;
        }
 
-       if (vcpu->arch.msr & MSR_PR)
+       if (vcpu->arch.shared->msr & MSR_PR)
                *vsid |= VSID_PR;
 
        return 0;
index 384179a5002b9f2bdad7c188651689550edfde5b..71c1f9027abb4fe529f258b5dc21fd8f2e7d6ccc 100644 (file)
@@ -66,7 +66,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
        struct kvmppc_sid_map *map;
        u16 sid_map_mask;
 
-       if (vcpu->arch.msr & MSR_PR)
+       if (vcpu->arch.shared->msr & MSR_PR)
                gvsid |= VSID_PR;
 
        sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -191,7 +191,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
        u16 sid_map_mask;
        static int backwards_map = 0;
 
-       if (vcpu->arch.msr & MSR_PR)
+       if (vcpu->arch.shared->msr & MSR_PR)
                gvsid |= VSID_PR;
 
        /* We might get collisions that trap in preceding order, so let's
index c85f906038ce8d4441a841721d046e7de3611c8f..35d3c16b293849a7aaf3ba7399399c0055d9e6d2 100644 (file)
@@ -86,14 +86,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
        case 31:
                switch (get_xop(inst)) {
                case OP_31_XOP_MFMSR:
-                       kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
+                       kvmppc_set_gpr(vcpu, get_rt(inst),
+                                      vcpu->arch.shared->msr);
                        break;
                case OP_31_XOP_MTMSRD:
                {
                        ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
                        if (inst & 0x10000) {
-                               vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
-                               vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
+                               vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
+                               vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
                        } else
                                kvmppc_set_msr(vcpu, rs);
                        break;
@@ -204,7 +205,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                                ra = kvmppc_get_gpr(vcpu, get_ra(inst));
 
                        addr = (ra + rb) & ~31ULL;
-                       if (!(vcpu->arch.msr & MSR_SF))
+                       if (!(vcpu->arch.shared->msr & MSR_SF))
                                addr &= 0xffffffff;
                        vaddr = addr;
 
index 474f2e24050a03a3d89ca85a7b34993d687a61f3..626e6efaa79f4dc5b6494c21c37ab737f42ff0e1 100644 (file)
@@ -165,9 +165,10 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
 {
        u64 dsisr;
+       struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
 
-       vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0);
-       vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
+       shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0);
+       shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0);
        vcpu->arch.dear = eaddr;
        /* Page Fault */
        dsisr = kvmppc_set_field(0, 33, 33, 1);
@@ -658,7 +659,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
        if (!kvmppc_inst_is_paired_single(vcpu, inst))
                return EMULATE_FAIL;
 
-       if (!(vcpu->arch.msr & MSR_FP)) {
+       if (!(vcpu->arch.shared->msr & MSR_FP)) {
                kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
                return EMULATE_AGAIN;
        }
index 8d4e35f5372c85d942d373b89a638f0189093c69..4ec9d49a1cb945d7fbf959bdf94ede5f33e11aae 100644 (file)
@@ -62,7 +62,7 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
 {
        int i;
 
-       printk("pc:   %08lx msr:  %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
+       printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
        printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
        printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
 
@@ -169,34 +169,34 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
                break;
        case BOOKE_IRQPRIO_CRITICAL:
        case BOOKE_IRQPRIO_WATCHDOG:
-               allowed = vcpu->arch.msr & MSR_CE;
+               allowed = vcpu->arch.shared->msr & MSR_CE;
                msr_mask = MSR_ME;
                break;
        case BOOKE_IRQPRIO_MACHINE_CHECK:
-               allowed = vcpu->arch.msr & MSR_ME;
+               allowed = vcpu->arch.shared->msr & MSR_ME;
                msr_mask = 0;
                break;
        case BOOKE_IRQPRIO_EXTERNAL:
        case BOOKE_IRQPRIO_DECREMENTER:
        case BOOKE_IRQPRIO_FIT:
-               allowed = vcpu->arch.msr & MSR_EE;
+               allowed = vcpu->arch.shared->msr & MSR_EE;
                msr_mask = MSR_CE|MSR_ME|MSR_DE;
                break;
        case BOOKE_IRQPRIO_DEBUG:
-               allowed = vcpu->arch.msr & MSR_DE;
+               allowed = vcpu->arch.shared->msr & MSR_DE;
                msr_mask = MSR_ME;
                break;
        }
 
        if (allowed) {
                vcpu->arch.srr0 = vcpu->arch.pc;
-               vcpu->arch.srr1 = vcpu->arch.msr;
+               vcpu->arch.srr1 = vcpu->arch.shared->msr;
                vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
                if (update_esr == true)
                        vcpu->arch.esr = vcpu->arch.queued_esr;
                if (update_dear == true)
                        vcpu->arch.dear = vcpu->arch.queued_dear;
-               kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
+               kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
 
                clear_bit(priority, &vcpu->arch.pending_exceptions);
        }
@@ -265,7 +265,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                break;
 
        case BOOKE_INTERRUPT_PROGRAM:
-               if (vcpu->arch.msr & MSR_PR) {
+               if (vcpu->arch.shared->msr & MSR_PR) {
                        /* Program traps generated by user-level software must be handled
                         * by the guest kernel. */
                        kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
@@ -467,7 +467,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.pc = 0;
-       vcpu->arch.msr = 0;
+       vcpu->arch.shared->msr = 0;
        kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
 
        vcpu->arch.shadow_pid = 1;
@@ -490,7 +490,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->ctr = vcpu->arch.ctr;
        regs->lr = vcpu->arch.lr;
        regs->xer = kvmppc_get_xer(vcpu);
-       regs->msr = vcpu->arch.msr;
+       regs->msr = vcpu->arch.shared->msr;
        regs->srr0 = vcpu->arch.srr0;
        regs->srr1 = vcpu->arch.srr1;
        regs->pid = vcpu->arch.pid;
index d59bcca1f9d8af1fa962222043a3955c68e2fe06..88258acc98fabb1067dc9ec580c1bbe2500b0398 100644 (file)
@@ -54,12 +54,12 @@ extern unsigned long kvmppc_booke_handlers;
  * changing. */
 static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
 {
-       if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
+       if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
                kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
 
-       vcpu->arch.msr = new_msr;
+       vcpu->arch.shared->msr = new_msr;
 
-       if (vcpu->arch.msr & MSR_WE) {
+       if (vcpu->arch.shared->msr & MSR_WE) {
                kvm_vcpu_block(vcpu);
                kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
        };
index cbc790ee192892fcc19f7a7d856fb1a2db6da2b6..b115203ac1185fefc67de79417798b9b383a7100 100644 (file)
@@ -62,7 +62,7 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
                case OP_31_XOP_MFMSR:
                        rt = get_rt(inst);
-                       kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr);
+                       kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
                        kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
                        break;
 
@@ -74,13 +74,13 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
                case OP_31_XOP_WRTEE:
                        rs = get_rs(inst);
-                       vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+                       vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
                                        | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
                        kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
                        break;
 
                case OP_31_XOP_WRTEEI:
-                       vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+                       vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
                                                         | (inst & MSR_EE);
                        kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
                        break;
index 380a78cf484dc2bdce54ef4b23b26eeb60d2e264..049846911ce4d18d8097507b9be0caa8d767733c 100644 (file)
@@ -415,7 +415,8 @@ lightweight_exit:
        lwz     r8, VCPU_GPR(r8)(r4)
        lwz     r3, VCPU_PC(r4)
        mtsrr0  r3
-       lwz     r3, VCPU_MSR(r4)
+       lwz     r3, VCPU_SHARED(r4)
+       lwz     r3, VCPU_SHARED_MSR(r3)
        oris    r3, r3, KVMPPC_MSR_MASK@h
        ori     r3, r3, KVMPPC_MSR_MASK@l
        mtsrr1  r3
index 21011e12caeb9e159e258785b0f8709fe52bed2c..092a390876f3337edf670d380eb7e706dbc238da 100644 (file)
@@ -314,10 +314,10 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
        stlbe->mas2 = (gvaddr & MAS2_EPN)
                | e500_shadow_mas2_attrib(gtlbe->mas2,
-                               vcpu_e500->vcpu.arch.msr & MSR_PR);
+                               vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
        stlbe->mas3 = (hpaddr & MAS3_RPN)
                | e500_shadow_mas3_attrib(gtlbe->mas3,
-                               vcpu_e500->vcpu.arch.msr & MSR_PR);
+                               vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
        stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
 
        trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
@@ -576,28 +576,28 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 
 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 {
-       unsigned int as = !!(vcpu->arch.msr & MSR_IS);
+       unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 
        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 }
 
 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 {
-       unsigned int as = !!(vcpu->arch.msr & MSR_DS);
+       unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 
        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 }
 
 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
 {
-       unsigned int as = !!(vcpu->arch.msr & MSR_IS);
+       unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 
        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
 }
 
 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
 {
-       unsigned int as = !!(vcpu->arch.msr & MSR_DS);
+       unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 
        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
 }
index d28e3010a5e22f3e43c9f47f4e444e8ab2071cd2..458946b4775d4eb747aff191412991423359656e 100644 (file)
@@ -171,7 +171,7 @@ static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
 
        /* Does it match current guest AS? */
        /* XXX what about IS != DS? */
-       if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
+       if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
                return 0;
 
        gpa = get_tlb_raddr(tlbe);
index 72a4ad86ee91f4f8900149f925df0eac755bcd10..22f6fa2982f28fb865bc09f60cacfafe9a2b0cb1 100644 (file)
@@ -38,7 +38,8 @@
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 {
-       return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
+       return !(v->arch.shared->msr & MSR_WE) ||
+              !!(v->arch.pending_exceptions);
 }