]> git.itanic.dy.fi Git - linux-stable/commitdiff
kvm: x86: Flush only affected TLB entries in kvm_mmu_invlpg*
authorJunaid Shahid <junaids@google.com>
Fri, 29 Jun 2018 20:10:05 +0000 (13:10 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 6 Aug 2018 15:59:01 +0000 (17:59 +0200)
This needs a minor bug fix. The updated patch is as follows.

Thanks,
Junaid

------------------------------------------------------------------------------

kvm_mmu_invlpg() and kvm_mmu_invpcid_gva() only need to flush the TLB
entries for the specific guest virtual address, instead of flushing all
TLB entries associated with the VM.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c

index 262b0bc64dfca56c432e5bdf9700dc6091232e63..dcdc9150bb76c4a5fcbc4381bbf328b3e8f358ea 100644 (file)
@@ -985,6 +985,14 @@ struct kvm_x86_ops {
 
        void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
 
+       /*
+        * Flush any TLB entries associated with the given GVA.
+        * Does not need to flush GPA->HPA mappings.
+        * Can potentially get non-canonical addresses through INVLPGs, which
+        * the implementation may choose to ignore if appropriate.
+        */
+       void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
+
        void (*run)(struct kvm_vcpu *vcpu);
        int (*handle_exit)(struct kvm_vcpu *vcpu);
        void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
index 9446a36a4ab7feebae32f39516aa96011153ba29..f84c194e6db10aa92cc7a52bba1e136ac7819156 100644 (file)
@@ -5226,6 +5226,10 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_mmu *mmu = &vcpu->arch.mmu;
 
+       /* INVLPG on a * non-canonical address is a NOP according to the SDM.  */
+       if (is_noncanonical_address(gva, vcpu))
+               return;
+
        mmu->invlpg(vcpu, gva, mmu->root_hpa);
 
        /*
@@ -5242,7 +5246,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
        if (VALID_PAGE(mmu->prev_root.hpa))
                mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
 
-       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+       kvm_x86_ops->tlb_flush_gva(vcpu, gva);
        ++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5250,18 +5254,22 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
 {
        struct kvm_mmu *mmu = &vcpu->arch.mmu;
+       bool tlb_flush = false;
 
        if (pcid == kvm_get_active_pcid(vcpu)) {
                mmu->invlpg(vcpu, gva, mmu->root_hpa);
-               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+               tlb_flush = true;
        }
 
        if (VALID_PAGE(mmu->prev_root.hpa) &&
            pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3)) {
                mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
-               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+               tlb_flush = true;
        }
 
+       if (tlb_flush)
+               kvm_x86_ops->tlb_flush_gva(vcpu, gva);
+
        ++vcpu->stat.invlpg;
 
        /*
index be9931cad40962139cbf556dc918a8f92d2cad62..73e27a98456fbd9cccf3bc1a2c7982cedb1302f0 100644 (file)
@@ -5434,6 +5434,13 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
                svm->asid_generation--;
 }
 
+static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       invlpga(gva, svm->vmcb->control.asid);
+}
+
 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
 {
 }
@@ -7086,6 +7093,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .set_rflags = svm_set_rflags,
 
        .tlb_flush = svm_flush_tlb,
+       .tlb_flush_gva = svm_flush_tlb_gva,
 
        .run = svm_vcpu_run,
        .handle_exit = handle_exit,
index b8121005113370997a084fd3da2bf88e46ba3039..5aea5af02386a64ce9d56b601f6c5d4c0b349cea 100644 (file)
@@ -1992,6 +1992,19 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
                         __loaded_vmcs_clear, loaded_vmcs, 1);
 }
 
+static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
+{
+       if (vpid == 0)
+               return true;
+
+       if (cpu_has_vmx_invvpid_individual_addr()) {
+               __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
+               return true;
+       }
+
+       return false;
+}
+
 static inline void vpid_sync_vcpu_single(int vpid)
 {
        if (vpid == 0)
@@ -4833,6 +4846,20 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
        __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
 }
 
+static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
+{
+       int vpid = to_vmx(vcpu)->vpid;
+
+       if (!vpid_sync_vcpu_addr(vpid, addr))
+               vpid_sync_context(vpid);
+
+       /*
+        * If VPIDs are not supported or enabled, then the above is a no-op.
+        * But we don't really need a TLB flush in that case anyway, because
+        * each VM entry/exit includes an implicit flush when VPID is 0.
+        */
+}
+
 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
        ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -13603,6 +13630,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .set_rflags = vmx_set_rflags,
 
        .tlb_flush = vmx_flush_tlb,
+       .tlb_flush_gva = vmx_flush_tlb_gva,
 
        .run = vmx_vcpu_run,
        .handle_exit = vmx_handle_exit,