Merge remote-tracking branch 'anton/abiv2' into next
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kvm / book3s_hv_rmhandlers.S
index 818dce344e82a0fa0a94fe7f21eefdb2483fbc68..9f0ad718e4766bd2b114d4314def16a46b304ebe 100644 (file)
@@ -28,6 +28,9 @@
 #include <asm/exception-64s.h>
 #include <asm/kvm_book3s_asm.h>
 #include <asm/mmu-hash64.h>
+#include <asm/tm.h>
+
+#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
 
 #ifdef __LITTLE_ENDIAN__
 #error Need to fix lppaca and SLB shadow accesses in little endian mode
@@ -75,8 +78,8 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 
        /* Restore SPRG3 */
-       ld      r3,PACA_SPRG3(r13)
-       mtspr   SPRN_SPRG3,r3
+       ld      r3,PACA_SPRG_VDSO(r13)
+       mtspr   SPRN_SPRG_VDSO_WRITE,r3
 
        /* Reload the host's PMU registers */
        ld      r3, PACALPPACAPTR(r13)  /* is the host using the PMU? */
@@ -106,8 +109,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        ld      r3, HSTATE_MMCR(r13)
        ld      r4, HSTATE_MMCR + 8(r13)
        ld      r5, HSTATE_MMCR + 16(r13)
+       ld      r6, HSTATE_MMCR + 24(r13)
+       ld      r7, HSTATE_MMCR + 32(r13)
        mtspr   SPRN_MMCR1, r4
        mtspr   SPRN_MMCRA, r5
+       mtspr   SPRN_SIAR, r6
+       mtspr   SPRN_SDAR, r7
+BEGIN_FTR_SECTION
+       ld      r8, HSTATE_MMCR + 40(r13)
+       ld      r9, HSTATE_MMCR + 48(r13)
+       mtspr   SPRN_MMCR2, r8
+       mtspr   SPRN_SIER, r9
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        mtspr   SPRN_MMCR0, r3
        isync
 23:
@@ -229,6 +242,12 @@ kvm_novcpu_exit:
  */
        .globl  kvm_start_guest
 kvm_start_guest:
+
+       /* Set runlatch bit the minute you wake up from nap */
+       mfspr   r1, SPRN_CTRLF
+       ori     r1, r1, 1
+       mtspr   SPRN_CTRLT, r1
+
        ld      r2,PACATOC(r13)
 
        li      r0,KVM_HWTHREAD_IN_KVM
@@ -296,6 +315,11 @@ kvm_no_guest:
        li      r0, KVM_HWTHREAD_IN_NAP
        stb     r0, HSTATE_HWTHREAD_STATE(r13)
 kvm_do_nap:
+       /* Clear the runlatch bit before napping */
+       mfspr   r2, SPRN_CTRLF
+       clrrdi  r2, r2, 1
+       mtspr   SPRN_CTRLT, r2
+
        li      r3, LPCR_PECE0
        mfspr   r4, SPRN_LPCR
        rlwimi  r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -597,6 +621,116 @@ BEGIN_FTR_SECTION
  END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+       b       skip_tm
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+
+       /* Turn on TM/FP/VSX/VMX so we can restore them. */
+       mfmsr   r5
+       li      r6, MSR_TM >> 32
+       sldi    r6, r6, 32
+       or      r5, r5, r6
+       ori     r5, r5, MSR_FP
+       oris    r5, r5, (MSR_VEC | MSR_VSX)@h
+       mtmsrd  r5
+
+       /*
+        * The user may change these outside of a transaction, so they must
+        * always be context switched.
+        */
+       ld      r5, VCPU_TFHAR(r4)
+       ld      r6, VCPU_TFIAR(r4)
+       ld      r7, VCPU_TEXASR(r4)
+       mtspr   SPRN_TFHAR, r5
+       mtspr   SPRN_TFIAR, r6
+       mtspr   SPRN_TEXASR, r7
+
+       ld      r5, VCPU_MSR(r4)
+       rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+       beq     skip_tm /* TM not active in guest */
+
+       /* Make sure the failure summary is set, otherwise we'll program check
+        * when we trechkpt.  It's possible that this might have been not set
+        * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+        * host.
+        */
+       oris    r7, r7, (TEXASR_FS)@h
+       mtspr   SPRN_TEXASR, r7
+
+       /*
+        * We need to load up the checkpointed state for the guest.
+        * We need to do this early as it will blow away any GPRs, VSRs and
+        * some SPRs.
+        */
+
+       mr      r31, r4
+       addi    r3, r31, VCPU_FPRS_TM
+       bl      .load_fp_state
+       addi    r3, r31, VCPU_VRS_TM
+       bl      .load_vr_state
+       mr      r4, r31
+       lwz     r7, VCPU_VRSAVE_TM(r4)
+       mtspr   SPRN_VRSAVE, r7
+
+       ld      r5, VCPU_LR_TM(r4)
+       lwz     r6, VCPU_CR_TM(r4)
+       ld      r7, VCPU_CTR_TM(r4)
+       ld      r8, VCPU_AMR_TM(r4)
+       ld      r9, VCPU_TAR_TM(r4)
+       mtlr    r5
+       mtcr    r6
+       mtctr   r7
+       mtspr   SPRN_AMR, r8
+       mtspr   SPRN_TAR, r9
+
+       /*
+        * Load up PPR and DSCR values but don't put them in the actual SPRs
+        * till the last moment to avoid running with userspace PPR and DSCR for
+        * too long.
+        */
+       ld      r29, VCPU_DSCR_TM(r4)
+       ld      r30, VCPU_PPR_TM(r4)
+
+       std     r2, PACATMSCRATCH(r13) /* Save TOC */
+
+       /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+       li      r5, 0
+       mtmsrd  r5, 1
+
+       /* Load GPRs r0-r28 */
+       reg = 0
+       .rept   29
+       ld      reg, VCPU_GPRS_TM(reg)(r31)
+       reg = reg + 1
+       .endr
+
+       mtspr   SPRN_DSCR, r29
+       mtspr   SPRN_PPR, r30
+
+       /* Load final GPRs */
+       ld      29, VCPU_GPRS_TM(29)(r31)
+       ld      30, VCPU_GPRS_TM(30)(r31)
+       ld      31, VCPU_GPRS_TM(31)(r31)
+
+       /* TM checkpointed state is now setup.  All GPRs are now volatile. */
+       TRECHKPT
+
+       /* Now let's get back the state we need. */
+       HMT_MEDIUM
+       GET_PACA(r13)
+       ld      r29, HSTATE_DSCR(r13)
+       mtspr   SPRN_DSCR, r29
+       ld      r4, HSTATE_KVM_VCPU(r13)
+       ld      r1, HSTATE_HOST_R1(r13)
+       ld      r2, PACATMSCRATCH(r13)
+
+       /* Set the MSR RI since we have our registers back. */
+       li      r5, MSR_RI
+       mtmsrd  r5, 1
+skip_tm:
+#endif
+
        /* Load guest PMU registers */
        /* R4 is live here (vcpu pointer) */
        li      r3, 1
@@ -704,14 +838,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        ld      r6, VCPU_VTB(r4)
        mtspr   SPRN_IC, r5
        mtspr   SPRN_VTB, r6
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-       ld      r5, VCPU_TFHAR(r4)
-       ld      r6, VCPU_TFIAR(r4)
-       ld      r7, VCPU_TEXASR(r4)
-       mtspr   SPRN_TFHAR, r5
-       mtspr   SPRN_TFIAR, r6
-       mtspr   SPRN_TEXASR, r7
-#endif
        ld      r8, VCPU_EBBHR(r4)
        mtspr   SPRN_EBBHR, r8
        ld      r5, VCPU_EBBRR(r4)
@@ -736,6 +862,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
         * Set the decrementer to the guest decrementer.
         */
        ld      r8,VCPU_DEC_EXPIRES(r4)
+       /* r8 is a host timebase value here, convert to guest TB */
+       ld      r5,HSTATE_KVM_VCORE(r13)
+       ld      r6,VCORE_TB_OFFSET(r5)
+       add     r8,r8,r6
        mftb    r7
        subf    r3,r7,r8
        mtspr   SPRN_DEC,r3
@@ -817,7 +947,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 12:    mtspr   SPRN_SRR0, r10
        mr      r10,r0
        mtspr   SPRN_SRR1, r11
-       ld      r11, VCPU_INTR_MSR(r4)
+       mr      r9, r4
+       bl      kvmppc_msr_interrupt
 5:
 
 /*
@@ -1098,17 +1229,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
        mftb    r6
        extsw   r5,r5
        add     r5,r5,r6
+       /* r5 is a guest timebase value here, convert to host TB */
+       ld      r3,HSTATE_KVM_VCORE(r13)
+       ld      r4,VCORE_TB_OFFSET(r3)
+       subf    r5,r4,r5
        std     r5,VCPU_DEC_EXPIRES(r9)
 
 BEGIN_FTR_SECTION
        b       8f
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
-       /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
-       mfmsr   r8
-       li      r0, 1
-       rldimi  r8, r0, MSR_TM_LG, 63-MSR_TM_LG
-       mtmsrd  r8
-
        /* Save POWER8-specific registers */
        mfspr   r5, SPRN_IAMR
        mfspr   r6, SPRN_PSPB
@@ -1122,14 +1251,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        std     r5, VCPU_IC(r9)
        std     r6, VCPU_VTB(r9)
        std     r7, VCPU_TAR(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-       mfspr   r5, SPRN_TFHAR
-       mfspr   r6, SPRN_TFIAR
-       mfspr   r7, SPRN_TEXASR
-       std     r5, VCPU_TFHAR(r9)
-       std     r6, VCPU_TFIAR(r9)
-       std     r7, VCPU_TEXASR(r9)
-#endif
        mfspr   r8, SPRN_EBBHR
        std     r8, VCPU_EBBHR(r9)
        mfspr   r5, SPRN_EBBRR
@@ -1387,7 +1508,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        ld      r8,VCORE_TB_OFFSET(r5)
        cmpdi   r8,0
        beq     17f
-       mftb    r6                      /* current host timebase */
+       mftb    r6                      /* current guest timebase */
        subf    r8,r8,r6
        mtspr   SPRN_TBU40,r8           /* update upper 40 bits */
        mftb    r7                      /* check if lower 24 bits overflowed */
@@ -1537,7 +1658,7 @@ kvmppc_hdsi:
        /* Search the hash table. */
        mr      r3, r9                  /* vcpu pointer */
        li      r7, 1                   /* data fault */
-       bl      .kvmppc_hpte_hv_fault
+       bl      kvmppc_hpte_hv_fault
        ld      r9, HSTATE_KVM_VCPU(r13)
        ld      r10, VCPU_PC(r9)
        ld      r11, VCPU_MSR(r9)
@@ -1557,7 +1678,7 @@ kvmppc_hdsi:
        mtspr   SPRN_SRR0, r10
        mtspr   SPRN_SRR1, r11
        li      r10, BOOK3S_INTERRUPT_DATA_STORAGE
-       ld      r11, VCPU_INTR_MSR(r9)
+       bl      kvmppc_msr_interrupt
 fast_interrupt_c_return:
 6:     ld      r7, VCPU_CTR(r9)
        lwz     r8, VCPU_XER(r9)
@@ -1611,7 +1732,7 @@ kvmppc_hisi:
        mr      r4, r10
        mr      r6, r11
        li      r7, 0                   /* instruction fault */
-       bl      .kvmppc_hpte_hv_fault
+       bl      kvmppc_hpte_hv_fault
        ld      r9, HSTATE_KVM_VCPU(r13)
        ld      r10, VCPU_PC(r9)
        ld      r11, VCPU_MSR(r9)
@@ -1626,7 +1747,7 @@ kvmppc_hisi:
 1:     mtspr   SPRN_SRR0, r10
        mtspr   SPRN_SRR1, r11
        li      r10, BOOK3S_INTERRUPT_INST_STORAGE
-       ld      r11, VCPU_INTR_MSR(r9)
+       bl      kvmppc_msr_interrupt
        b       fast_interrupt_c_return
 
 3:     ld      r6, VCPU_KVM(r9)        /* not relocated, use VRMA */
@@ -1669,7 +1790,7 @@ sc_1_fast_return:
        mtspr   SPRN_SRR0,r10
        mtspr   SPRN_SRR1,r11
        li      r10, BOOK3S_INTERRUPT_SYSCALL
-       ld      r11, VCPU_INTR_MSR(r9)
+       bl      kvmppc_msr_interrupt
        mr      r4,r9
        b       fast_guest_return
 
@@ -1685,16 +1806,16 @@ hcall_real_fallback:
        .globl  hcall_real_table
 hcall_real_table:
        .long   0               /* 0 - unused */
-       .long   .kvmppc_h_remove - hcall_real_table
-       .long   .kvmppc_h_enter - hcall_real_table
-       .long   .kvmppc_h_read - hcall_real_table
+       .long   DOTSYM(kvmppc_h_remove) - hcall_real_table
+       .long   DOTSYM(kvmppc_h_enter) - hcall_real_table
+       .long   DOTSYM(kvmppc_h_read) - hcall_real_table
        .long   0               /* 0x10 - H_CLEAR_MOD */
        .long   0               /* 0x14 - H_CLEAR_REF */
-       .long   .kvmppc_h_protect - hcall_real_table
-       .long   0               /* 0x1c - H_GET_TCE */
-       .long   .kvmppc_h_put_tce - hcall_real_table
+       .long   DOTSYM(kvmppc_h_protect) - hcall_real_table
+       .long   DOTSYM(kvmppc_h_get_tce) - hcall_real_table
+       .long   DOTSYM(kvmppc_h_put_tce) - hcall_real_table
        .long   0               /* 0x24 - H_SET_SPRG0 */
-       .long   .kvmppc_h_set_dabr - hcall_real_table
+       .long   DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
        .long   0               /* 0x2c */
        .long   0               /* 0x30 */
        .long   0               /* 0x34 */
@@ -1710,11 +1831,11 @@ hcall_real_table:
        .long   0               /* 0x5c */
        .long   0               /* 0x60 */
 #ifdef CONFIG_KVM_XICS
-       .long   .kvmppc_rm_h_eoi - hcall_real_table
-       .long   .kvmppc_rm_h_cppr - hcall_real_table
-       .long   .kvmppc_rm_h_ipi - hcall_real_table
+       .long   DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
+       .long   DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
+       .long   DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
        .long   0               /* 0x70 - H_IPOLL */
-       .long   .kvmppc_rm_h_xirr - hcall_real_table
+       .long   DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
 #else
        .long   0               /* 0x64 - H_EOI */
        .long   0               /* 0x68 - H_CPPR */
@@ -1748,7 +1869,7 @@ hcall_real_table:
        .long   0               /* 0xd4 */
        .long   0               /* 0xd8 */
        .long   0               /* 0xdc */
-       .long   .kvmppc_h_cede - hcall_real_table
+       .long   DOTSYM(kvmppc_h_cede) - hcall_real_table
        .long   0               /* 0xe4 */
        .long   0               /* 0xe8 */
        .long   0               /* 0xec */
@@ -1765,11 +1886,11 @@ hcall_real_table:
        .long   0               /* 0x118 */
        .long   0               /* 0x11c */
        .long   0               /* 0x120 */
-       .long   .kvmppc_h_bulk_remove - hcall_real_table
+       .long   DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
        .long   0               /* 0x128 */
        .long   0               /* 0x12c */
        .long   0               /* 0x130 */
-       .long   .kvmppc_h_set_xdabr - hcall_real_table
+       .long   DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
 hcall_real_table_end:
 
 ignore_hdec:
@@ -1889,8 +2010,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
 
        /*
         * Take a nap until a decrementer or external or doobell interrupt
-        * occurs, with PECE1, PECE0 and PECEDP set in LPCR
+        * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
+        * runlatch bit before napping.
         */
+       mfspr   r2, SPRN_CTRLF
+       clrrdi  r2, r2, 1
+       mtspr   SPRN_CTRLT, r2
+
        li      r0,1
        stb     r0,HSTATE_HWTHREAD_REQ(r13)
        mfspr   r5,SPRN_LPCR
@@ -1989,7 +2115,7 @@ kvm_cede_exit:
        /* Try to handle a machine check in real mode */
 machine_check_realmode:
        mr      r3, r9          /* get vcpu pointer */
-       bl      .kvmppc_realmode_machine_check
+       bl      kvmppc_realmode_machine_check
        nop
        cmpdi   r3, 0           /* continue exiting from guest? */
        ld      r9, HSTATE_KVM_VCPU(r13)
@@ -1997,7 +2123,7 @@ machine_check_realmode:
        beq     mc_cont
        /* If not, deliver a machine check.  SRR0/1 are already set */
        li      r10, BOOK3S_INTERRUPT_MACHINE_CHECK
-       ld      r11, VCPU_INTR_MSR(r9)
+       bl      kvmppc_msr_interrupt
        b       fast_interrupt_c_return
 
 /*
@@ -2138,8 +2264,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        mfspr   r6,SPRN_VRSAVE
        stw     r6,VCPU_VRSAVE(r31)
        mtlr    r30
-       mtmsrd  r5
-       isync
        blr
 
 /*
@@ -2186,3 +2310,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  */
 kvmppc_bad_host_intr:
        b       .
+
+/*
+ * This mimics the MSR transition on IRQ delivery.  The new guest MSR is taken
+ * from VCPU_INTR_MSR and is modified based on the required TM state changes.
+ *   r11 has the guest MSR value (in/out)
+ *   r9 has a vcpu pointer (in)
+ *   r0 is used as a scratch register
+ */
+kvmppc_msr_interrupt:
+       rldicl  r0, r11, 64 - MSR_TS_S_LG, 62
+       cmpwi   r0, 2 /* Check if we are in transactional state..  */
+       ld      r11, VCPU_INTR_MSR(r9)
+       bne     1f
+       /* ... if transactional, change to suspended */
+       li      r0, 1
+1:     rldimi  r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+       blr