KVM: x86/xen: Take srcu lock when accessing kvm_memslots()
authorWanpeng Li <wanpengli@tencent.com>
Fri, 23 Apr 2021 08:23:20 +0000 (16:23 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 23 Apr 2021 21:00:50 +0000 (17:00 -0400)
kvm_memslots() will be called by kvm_write_guest_offset_cached() so we should
take the srcu lock. Let's pull the srcu lock operation from kvm_steal_time_set_preempted()
again to fix xen part.

Fixes: 30b5c851af7 ("KVM: x86/xen: Add support for vCPU runstate information")
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <1619166200-9215-1-git-send-email-wanpengli@tencent.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c

index eca63625aee4d826913d944bb98d2f39bd14cce2..ee0dc58ac3a51cb6b8a273b5e28b26395f8f85d8 100644 (file)
@@ -4025,7 +4025,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 {
        struct kvm_host_map map;
        struct kvm_steal_time *st;
-       int idx;
 
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
@@ -4033,15 +4032,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
        if (vcpu->arch.st.preempted)
                return;
 
-       /*
-        * Take the srcu lock as memslots will be accessed to check the gfn
-        * cache generation against the memslots generation.
-        */
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-
        if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
                        &vcpu->arch.st.cache, true))
-               goto out;
+               return;
 
        st = map.hva +
                offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
@@ -4049,20 +4042,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
        st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
 
        kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
-
-out:
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       int idx;
+
        if (vcpu->preempted && !vcpu->arch.guest_state_protected)
                vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
 
+       /*
+        * Take the srcu lock as memslots will be accessed to check the gfn
+        * cache generation against the memslots generation.
+        */
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (kvm_xen_msr_enabled(vcpu->kvm))
                kvm_xen_runstate_set_preempted(vcpu);
        else
                kvm_steal_time_set_preempted(vcpu);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
        static_call(kvm_x86_vcpu_put)(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();