KVM: s390: Use common waitqueue
authorChristian Borntraeger <borntraeger@de.ibm.com>
Wed, 12 Jun 2013 11:54:55 +0000 (13:54 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 17 Jun 2013 15:09:17 +0000 (17:09 +0200)
Lets use the common waitqueue for kvm cpus on s390. By itself it is
just a cleanup, but it should also improve the accuracy of diag 0x44
which is implemented via kvm_vcpu_on_spin. kvm_vcpu_on_spin has
an explicit check for waiting on the waitqueue to optimize the
yielding.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/sigp.c

index 43207dd45fabdf0eb0dd24eb85498f1b3727abd0..d3ffd7eded3cc3efec6b8d559a4f71288030b333 100644 (file)
@@ -228,7 +228,7 @@ struct kvm_s390_local_interrupt {
        atomic_t active;
        struct kvm_s390_float_interrupt *float_int;
        int timer_due; /* event indicator for waitqueue below */
-       wait_queue_head_t wq;
+       wait_queue_head_t *wq;
        atomic_t *cpuflags;
        unsigned int action_bits;
 };
index 5c948177529e281ca7138a1b9bd7ef0186f4cec9..7f35cb33e5102008244b4fd1376954400f2d0009 100644 (file)
@@ -438,7 +438,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
 no_timer:
        spin_lock(&vcpu->arch.local_int.float_int->lock);
        spin_lock_bh(&vcpu->arch.local_int.lock);
-       add_wait_queue(&vcpu->arch.local_int.wq, &wait);
+       add_wait_queue(&vcpu->wq, &wait);
        while (list_empty(&vcpu->arch.local_int.list) &&
                list_empty(&vcpu->arch.local_int.float_int->list) &&
                (!vcpu->arch.local_int.timer_due) &&
@@ -452,7 +452,7 @@ no_timer:
        }
        __unset_cpu_idle(vcpu);
        __set_current_state(TASK_RUNNING);
-       remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
+       remove_wait_queue(&vcpu->wq, &wait);
        spin_unlock_bh(&vcpu->arch.local_int.lock);
        spin_unlock(&vcpu->arch.local_int.float_int->lock);
        hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
@@ -465,8 +465,8 @@ void kvm_s390_tasklet(unsigned long parm)
 
        spin_lock(&vcpu->arch.local_int.lock);
        vcpu->arch.local_int.timer_due = 1;
-       if (waitqueue_active(&vcpu->arch.local_int.wq))
-               wake_up_interruptible(&vcpu->arch.local_int.wq);
+       if (waitqueue_active(&vcpu->wq))
+               wake_up_interruptible(&vcpu->wq);
        spin_unlock(&vcpu->arch.local_int.lock);
 }
 
@@ -613,7 +613,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
        spin_lock_bh(&li->lock);
        list_add(&inti->list, &li->list);
        atomic_set(&li->active, 1);
-       BUG_ON(waitqueue_active(&li->wq));
+       BUG_ON(waitqueue_active(li->wq));
        spin_unlock_bh(&li->lock);
        return 0;
 }
@@ -746,8 +746,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
        li = fi->local_int[sigcpu];
        spin_lock_bh(&li->lock);
        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-       if (waitqueue_active(&li->wq))
-               wake_up_interruptible(&li->wq);
+       if (waitqueue_active(li->wq))
+               wake_up_interruptible(li->wq);
        spin_unlock_bh(&li->lock);
        spin_unlock(&fi->lock);
        mutex_unlock(&kvm->lock);
@@ -832,8 +832,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
        if (inti->type == KVM_S390_SIGP_STOP)
                li->action_bits |= ACTION_STOP_ON_STOP;
        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-       if (waitqueue_active(&li->wq))
-               wake_up_interruptible(&vcpu->arch.local_int.wq);
+       if (waitqueue_active(&vcpu->wq))
+               wake_up_interruptible(&vcpu->wq);
        spin_unlock_bh(&li->lock);
        mutex_unlock(&vcpu->kvm->lock);
        return 0;
index a3183651ff4508e964c4b3f5c1ed2e1f4bdc8a03..ba694d2ba51e36a2933ad722db76b00e09735f37 100644 (file)
@@ -438,7 +438,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
        spin_lock(&kvm->arch.float_int.lock);
        kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
-       init_waitqueue_head(&vcpu->arch.local_int.wq);
+       vcpu->arch.local_int.wq = &vcpu->wq;
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
        spin_unlock(&kvm->arch.float_int.lock);
 
index 1c48ab2845e0fdab9ec19fbb03060d4346ee4ffa..033c864f1ae8b0f02859a1c64944cadc904c165f 100644 (file)
@@ -79,8 +79,8 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
        list_add_tail(&inti->list, &li->list);
        atomic_set(&li->active, 1);
        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-       if (waitqueue_active(&li->wq))
-               wake_up_interruptible(&li->wq);
+       if (waitqueue_active(li->wq))
+               wake_up_interruptible(li->wq);
        spin_unlock_bh(&li->lock);
        rc = SIGP_CC_ORDER_CODE_ACCEPTED;
        VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
@@ -117,8 +117,8 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
        list_add_tail(&inti->list, &li->list);
        atomic_set(&li->active, 1);
        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-       if (waitqueue_active(&li->wq))
-               wake_up_interruptible(&li->wq);
+       if (waitqueue_active(li->wq))
+               wake_up_interruptible(li->wq);
        spin_unlock_bh(&li->lock);
        rc = SIGP_CC_ORDER_CODE_ACCEPTED;
        VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
@@ -145,8 +145,8 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
        atomic_set(&li->active, 1);
        atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
        li->action_bits |= action;
-       if (waitqueue_active(&li->wq))
-               wake_up_interruptible(&li->wq);
+       if (waitqueue_active(li->wq))
+               wake_up_interruptible(li->wq);
 out:
        spin_unlock_bh(&li->lock);
 
@@ -250,8 +250,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
 
        list_add_tail(&inti->list, &li->list);
        atomic_set(&li->active, 1);
-       if (waitqueue_active(&li->wq))
-               wake_up_interruptible(&li->wq);
+       if (waitqueue_active(li->wq))
+               wake_up_interruptible(li->wq);
        rc = SIGP_CC_ORDER_CODE_ACCEPTED;
 
        VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);