]> git.itanic.dy.fi Git - linux-stable/commitdiff
KVM: x86: Introduce helper to handle Hyper-V paravirt TLB flush requests
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 5 Dec 2023 10:36:21 +0000 (11:36 +0100)
committerSean Christopherson <seanjc@google.com>
Thu, 7 Dec 2023 17:34:23 +0000 (09:34 -0800)
As a preparation to making Hyper-V emulation optional, introduce a helper
to handle pending KVM_REQ_HV_TLB_FLUSH requests.

No functional change intended.

Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Tested-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Link: https://lore.kernel.org/r/20231205103630.1391318-8-vkuznets@redhat.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/hyperv.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/vmx/nested.c

index 9d8fa6ba634139448f63a6eb7e665c65bd10d895..77f6549aa5de5f94633c80c0c1dec39381316df8 100644 (file)
@@ -247,6 +247,19 @@ static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
        return kvm_hv_get_assist_page(vcpu);
 }
 
+static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu,
+                                                    bool tdp_enabled)
+{
+       /*
+        * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
+        * L2's VP_ID upon request from the guest. Make sure we check for
+        * pending entries in the right FIFO upon L1/L2 transition as these
+        * requests are put by other vCPUs asynchronously.
+        */
+       if (to_hv_vcpu(vcpu) && tdp_enabled)
+               kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+}
+
 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
 
 #endif
index 3fea8c47679e6899742c6f5aa08046da041439a2..74c04102ef01cb10933ce34f36a3eda679d6c50a 100644 (file)
@@ -487,14 +487,8 @@ static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
 
 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
 {
-       /*
-        * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
-        * L2's VP_ID upon request from the guest. Make sure we check for
-        * pending entries in the right FIFO upon L1/L2 transition as these
-        * requests are put by other vCPUs asynchronously.
-        */
-       if (to_hv_vcpu(vcpu) && npt_enabled)
-               kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+       /* Handle pending Hyper-V TLB flush requests */
+       kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
 
        /*
         * TODO: optimize unconditional TLB flush/MMU sync.  A partial list of
index c5ec0ef51ff78fa3baae19092d12057c07476394..382c0746d0693628ad7541ca55d17a26c06860d0 100644 (file)
@@ -1139,14 +1139,8 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       /*
-        * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
-        * L2's VP_ID upon request from the guest. Make sure we check for
-        * pending entries in the right FIFO upon L1/L2 transition as these
-        * requests are put by other vCPUs asynchronously.
-        */
-       if (to_hv_vcpu(vcpu) && enable_ept)
-               kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
+       /* Handle pending Hyper-V TLB flush requests */
+       kvm_hv_nested_transtion_tlb_flush(vcpu, enable_ept);
 
        /*
         * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings