]> git.itanic.dy.fi Git - linux-stable/commitdiff
KVM: nVMX: Keep track of hv_vm_id/hv_vp_id when eVMCS is in use
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 1 Nov 2022 14:53:57 +0000 (15:53 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 18 Nov 2022 17:59:11 +0000 (12:59 -0500)
To handle L2 TLB flush requests, KVM needs to keep track of L2's VM_ID/
VP_IDs which are set by L1 hypervisor. 'Partition assist page' address is
also needed to handle post-flush exit to L1 upon request.

Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20221101145426.251680-20-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx/nested.c

index 4596f19f927b1b79f507083a118988d6a54cb2be..63dad1e129698043e208debc4488730407166ad1 100644 (file)
@@ -668,6 +668,12 @@ struct kvm_vcpu_hv {
 
        /* Preallocated buffer for handling hypercalls passing sparse vCPU set */
        u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
+
+       struct {
+               u64 pa_page_gpa;
+               u64 vm_id;
+               u32 vp_id;
+       } nested;
 };
 
 /* Xen HVM per vcpu emulation context */
index 048b2c3e3b3fc93f4c9c6c97ef9b11d85460a568..cce68fd5befbc0a7850478f425db630a1767a7f7 100644 (file)
@@ -225,6 +225,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
 
 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
 {
+       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
        if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
@@ -233,6 +234,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
        }
 
        vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
+
+       if (hv_vcpu) {
+               hv_vcpu->nested.pa_page_gpa = INVALID_GPA;
+               hv_vcpu->nested.vm_id = 0;
+               hv_vcpu->nested.vp_id = 0;
+       }
 }
 
 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
@@ -1557,11 +1564,19 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
 {
        struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
        struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+       struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu);
 
        /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
        vmcs12->tpr_threshold = evmcs->tpr_threshold;
        vmcs12->guest_rip = evmcs->guest_rip;
 
+       if (unlikely(!(hv_clean_fields &
+                      HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) {
+               hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page;
+               hv_vcpu->nested.vm_id = evmcs->hv_vm_id;
+               hv_vcpu->nested.vp_id = evmcs->hv_vp_id;
+       }
+
        if (unlikely(!(hv_clean_fields &
                       HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
                vmcs12->guest_rsp = evmcs->guest_rsp;