KVM: PPC: Fix issue clearing exit timing counters
authorBharat Bhushan <r65777@freescale.com>
Fri, 25 Mar 2011 05:02:13 +0000 (10:32 +0530)
committerAvi Kivity <avi@redhat.com>
Wed, 11 May 2011 11:57:04 +0000 (07:57 -0400)
Following dump is observed on host when clearing the exit timing counters

[root@p1021mds kvm]# echo -n 'c' > vm1200_vcpu0_timing
INFO: task echo:1276 blocked for more than 120 seconds.
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
echo          D 0ff5bf94     0  1276   1190 0x00000000
Call Trace:
[c2157e40] [c0007908] __switch_to+0x9c/0xc4
[c2157e50] [c040293c] schedule+0x1b4/0x3bc
[c2157e90] [c04032dc] __mutex_lock_slowpath+0x74/0xc0
[c2157ec0] [c00369e4] kvmppc_init_timing_stats+0x20/0xb8
[c2157ed0] [c0036b00] kvmppc_exit_timing_write+0x84/0x98
[c2157ef0] [c00b9f90] vfs_write+0xc0/0x16c
[c2157f10] [c00ba284] sys_write+0x4c/0x90
[c2157f40] [c000e320] ret_from_syscall+0x0/0x3c

        The vcpu->mutex is used by kvm_ioctl_* (KVM_RUN etc) and same was
used when clearing the stats (in kvmppc_init_timing_stats()). What happens
is that when the guest is idle then it held the vcpu->mutx. While the
exiting timing process waits for guest to release the vcpu->mutex and
a hang state is reached.

        Now using seprate lock for exit timing stats.

Signed-off-by: Bharat Bhushan <Bharat.Bhushan@freescale.com>
Acked-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kvm/powerpc.c
arch/powerpc/kvm/timing.c

index bba3b9b72a39e4db1b1a149d981214281a708282..890897cee050acea7edbf8c008de7420e8f5c806 100644 (file)
@@ -255,6 +255,7 @@ struct kvm_vcpu_arch {
        u32 dbsr;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
+       struct mutex exit_timing_lock;
        struct kvmppc_exit_timing timing_exit;
        struct kvmppc_exit_timing timing_last_enter;
        u32 last_exit_type;
index 99758460efdef8364abb8678cd5d136a70bdb094..ec3d2e75c0a8a6c73a15bfa148701011bc8af912 100644 (file)
@@ -284,6 +284,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
        vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
 
+#ifdef CONFIG_KVM_EXIT_TIMING
+       mutex_init(&vcpu->arch.exit_timing_lock);
+#endif
+
        return 0;
 }
 
index a021f5827a336ce97b6c62ed5b8d0ca625d571bf..18f40fd3e98f43739b64b3dc13864bc34925be49 100644 (file)
@@ -34,8 +34,8 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
 {
        int i;
 
-       /* pause guest execution to avoid concurrent updates */
-       mutex_lock(&vcpu->mutex);
+       /* Take a lock to avoid concurrent updates */
+       mutex_lock(&vcpu->arch.exit_timing_lock);
 
        vcpu->arch.last_exit_type = 0xDEAD;
        for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
@@ -49,7 +49,7 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
        vcpu->arch.timing_exit.tv64 = 0;
        vcpu->arch.timing_last_enter.tv64 = 0;
 
-       mutex_unlock(&vcpu->mutex);
+       mutex_unlock(&vcpu->arch.exit_timing_lock);
 }
 
 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
@@ -65,6 +65,8 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
                return;
        }
 
+       mutex_lock(&vcpu->arch.exit_timing_lock);
+
        vcpu->arch.timing_count_type[type]++;
 
        /* sum */
@@ -93,6 +95,8 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
                vcpu->arch.timing_min_duration[type] = duration;
        if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
                vcpu->arch.timing_max_duration[type] = duration;
+
+       mutex_unlock(&vcpu->arch.exit_timing_lock);
 }
 
 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)