]> git.itanic.dy.fi Git - linux-stable/commitdiff
kvm: x86: Skip TLB flush on fast CR3 switch when indicated by guest
authorJunaid Shahid <junaids@google.com>
Wed, 27 Jun 2018 21:59:15 +0000 (14:59 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 6 Aug 2018 15:58:58 +0000 (17:58 +0200)
When PCIDs are enabled, the MSb of the source operand for a MOV-to-CR3
instruction indicates that the TLB doesn't need to be flushed.

This change enables this optimization for MOV-to-CR3s in the guest
that have been intercepted by KVM for shadow paging and are handled
within the fast CR3 switch path.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 1c253f15f9cd82a697a6a1ed187ad7dcdabd21a3..8b4aa5e7ff924723ee4ea23508f5428164d1db69 100644 (file)
@@ -1318,7 +1318,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
                       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
 
 void kvm_enable_tdp(void);
 void kvm_disable_tdp(void);
index 2af15db12ccdc13f1877ad92ad3576d75d044abc..2b7cfc8e41bba49d81ef5b3906f594300f3253d8 100644 (file)
@@ -4037,7 +4037,8 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
 }
 
 static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
-                           union kvm_mmu_page_role new_role)
+                           union kvm_mmu_page_role new_role,
+                           bool skip_tlb_flush)
 {
        struct kvm_mmu *mmu = &vcpu->arch.mmu;
 
@@ -4070,7 +4071,9 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
 
                        kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
                        kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-                       kvm_x86_ops->tlb_flush(vcpu, true);
+                       if (!skip_tlb_flush)
+                               kvm_x86_ops->tlb_flush(vcpu, true);
+
                        __clear_sp_write_flooding_count(
                                page_header(mmu->root_hpa));
 
@@ -4082,15 +4085,17 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
 }
 
 static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
-                             union kvm_mmu_page_role new_role)
+                             union kvm_mmu_page_role new_role,
+                             bool skip_tlb_flush)
 {
-       if (!fast_cr3_switch(vcpu, new_cr3, new_role))
+       if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
                kvm_mmu_free_roots(vcpu, false);
 }
 
-void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3)
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
 {
-       __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu));
+       __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
+                         skip_tlb_flush);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
 
@@ -4733,7 +4738,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
        union kvm_mmu_page_role root_page_role =
                kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
 
-       __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role);
+       __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false);
        context->shadow_root_level = PT64_ROOT_4LEVEL;
 
        context->nx = true;
@@ -5196,11 +5201,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        }
 
+       if (VALID_PAGE(mmu->prev_root.hpa) &&
+           pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3))
+               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+
        ++vcpu->stat.invlpg;
 
        /*
-        * Mappings not reachable via the current cr3 will be synced when
-        * switching to that cr3, so nothing needs to be done here for them.
+        * Mappings not reachable via the current cr3 or the prev_root.cr3 will
+        * be synced when switching to that cr3, so nothing needs to be done
+        * here for them.
         */
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
index 80be024cb979e3ca7c3133e72d02fc4479c28253..6151418cec323e2f6de0079b9b75119973f0bc10 100644 (file)
@@ -8819,10 +8819,14 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
                        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
                }
 
+               if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_root.cr3)
+                   == operand.pcid)
+                       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+
                /*
-                * If the current cr3 does not use the given PCID, then nothing
-                * needs to be done here because a resync will happen anyway
-                * before switching to any other CR3.
+                * If neither the current cr3 nor the prev_root.cr3 use the
+                * given PCID, then nothing needs to be done here because a
+                * resync will happen anyway before switching to any other CR3.
                 */
 
                return kvm_skip_emulated_instruction(vcpu);
@@ -11434,7 +11438,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
        }
 
        if (!nested_ept)
-               kvm_mmu_new_cr3(vcpu, cr3);
+               kvm_mmu_new_cr3(vcpu, cr3, false);
 
        vcpu->arch.cr3 = cr3;
        __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
index 7748037b17fd5b953807882292afd7f9c380a5d5..493afbf12e78a6b6ebcbb8e3972c667d9de03ea2 100644 (file)
@@ -847,16 +847,21 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
 
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
+       bool skip_tlb_flush = false;
 #ifdef CONFIG_X86_64
        bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
 
-       if (pcid_enabled)
+       if (pcid_enabled) {
+               skip_tlb_flush = cr3 & CR3_PCID_INVD;
                cr3 &= ~CR3_PCID_INVD;
+       }
 #endif
 
        if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
                kvm_mmu_sync_roots(vcpu);
-               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+
+               if (!skip_tlb_flush)
+                       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
                return 0;
        }
 
@@ -867,7 +872,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
                return 1;
 
-       kvm_mmu_new_cr3(vcpu, cr3);
+       kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
        vcpu->arch.cr3 = cr3;
        __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);