KVM: x86: Prevent host from panicking on shared MSR writes.
authorAndy Honig <ahonig@google.com>
Wed, 27 Aug 2014 18:16:44 +0000 (11:16 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 14 Nov 2014 16:47:56 +0000 (08:47 -0800)
commit 8b3c3104c3f4f706e99365c3e0d2aa61b95f969f upstream.

The previous patch blocked invalid writes directly when the MSR
is written.  As a precaution, prevent future similar mistakes by
gracefulling handle GPs caused by writes to shared MSRs.

Signed-off-by: Andrew Honig <ahonig@google.com>
[Remove parts obsoleted by Nadav's patch. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 373058c9b75d8d49c48de361e0d120a31c63723d..0312876eadb3c75265ca0ea40c905506fb76a8bb 100644 (file)
@@ -1011,7 +1011,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
 
 void kvm_define_shared_msr(unsigned index, u32 msr);
-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
 
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
 
index 8d9d37ff825051b8d8db5c457528e500df9ef748..882d6a95fa1be188270fd9a89b88cde9040039b5 100644 (file)
@@ -2493,12 +2493,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        break;
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
+                       u64 old_msr_data = msr->data;
                        msr->data = data;
                        if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
                                preempt_disable();
-                               kvm_set_shared_msr(msr->index, msr->data,
-                                                  msr->mask);
+                               ret = kvm_set_shared_msr(msr->index, msr->data,
+                                                        msr->mask);
                                preempt_enable();
+                               if (ret)
+                                       msr->data = old_msr_data;
                        }
                        break;
                }
index e8753555f1446554b44462beb894579f52d4f1cc..33ea3d07005fc15ceaed5609067e36a4b8d32861 100644 (file)
@@ -225,20 +225,25 @@ static void kvm_shared_msr_cpu_online(void)
                shared_msr_update(i, shared_msrs_global.msrs[i]);
 }
 
-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
 {
        unsigned int cpu = smp_processor_id();
        struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
+       int err;
 
        if (((value ^ smsr->values[slot].curr) & mask) == 0)
-               return;
+               return 0;
        smsr->values[slot].curr = value;
-       wrmsrl(shared_msrs_global.msrs[slot], value);
+       err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
+       if (err)
+               return 1;
+
        if (!smsr->registered) {
                smsr->urn.on_user_return = kvm_on_user_return;
                user_return_notifier_register(&smsr->urn);
                smsr->registered = true;
        }
+       return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);