In 64 bit kernels, the Fixed Point Exception Register (XER) is a 64
bit field (e.g. in kvm_regs and kvm_vcpu_arch) and in most places it is
accessed as such.
This patch corrects places where it is accessed as a 32 bit field by a
64 bit kernel. In some cases this is via a 32 bit load or store
instruction which, depending on endianness, will cause either the
lower or upper 32 bits to be missed. In another case it is cast as a
u32, causing the upper 32 bits to be cleared.
This patch corrects those places by extending the access methods to
64 bits.
Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com>
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Tested-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
return vcpu->arch.cr;
}
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.xer = val;
}
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
bool in_use;
ulong gpr[14];
u32 cr;
- u32 xer;
+ ulong xer;
ulong ctr;
ulong lr;
ulong pc;
return vcpu->arch.cr;
}
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.xer = val;
}
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.xer;
}
blt hdec_soon
ld r6, VCPU_CTR(r4)
- lwz r7, VCPU_XER(r4)
+ ld r7, VCPU_XER(r4)
mtctr r6
mtxer r7
mfctr r3
mfxer r4
std r3, VCPU_CTR(r9)
- stw r4, VCPU_XER(r9)
+ std r4, VCPU_XER(r9)
/* If this is a page table miss then see if it's theirs or ours */
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
bl kvmppc_msr_interrupt
fast_interrupt_c_return:
6: ld r7, VCPU_CTR(r9)
- lwz r8, VCPU_XER(r9)
+ ld r8, VCPU_XER(r9)
mtctr r7
mtxer r8
mr r4, r9
PPC_LL r8, SVCPU_CTR(r3)
PPC_LL r9, SVCPU_LR(r3)
lwz r10, SVCPU_CR(r3)
- lwz r11, SVCPU_XER(r3)
+ PPC_LL r11, SVCPU_XER(r3)
mtctr r8
mtlr r9
mfctr r8
mflr r9
- stw r5, SVCPU_XER(r13)
+ PPC_STL r5, SVCPU_XER(r13)
PPC_STL r6, SVCPU_FAULT_DAR(r13)
stw r7, SVCPU_FAULT_DSISR(r13)
PPC_STL r8, SVCPU_CTR(r13)