]> git.itanic.dy.fi Git - linux-stable/commitdiff
x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 21 Jun 2018 02:01:22 +0000 (22:01 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Aug 2018 15:37:29 +0000 (17:37 +0200)
commit 989e3992d2eca32c3f1404f2bc91acda3aa122d8 upstream.

The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend
add_atomic_switch_msr() with an entry_only parameter to allow storing the
MSR only in the guest (ENTRY) MSR array.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/vmx.c

index 5e908d7a3fbc5e4c47cea66454a27f4db1b3e36d..65cdc92fd3c6ac27388c366e34dfc6f52acae93c 100644 (file)
@@ -2495,9 +2495,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
 }
 
 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
-                                 u64 guest_val, u64 host_val)
+                                 u64 guest_val, u64 host_val, bool entry_only)
 {
-       int i, j;
+       int i, j = 0;
        struct msr_autoload *m = &vmx->msr_autoload;
 
        switch (msr) {
@@ -2533,7 +2533,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
        }
 
        i = find_msr(&m->guest, msr);
-       j = find_msr(&m->host, msr);
+       if (!entry_only)
+               j = find_msr(&m->host, msr);
+
        if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
                printk_once(KERN_WARNING "Not enough msr switch entries. "
                                "Can't add msr %x\n", msr);
@@ -2543,12 +2545,16 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                i = m->guest.nr++;
                vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
        }
+       m->guest.val[i].index = msr;
+       m->guest.val[i].value = guest_val;
+
+       if (entry_only)
+               return;
+
        if (j < 0) {
                j = m->host.nr++;
                vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
        }
-       m->guest.val[i].index = msr;
-       m->guest.val[i].value = guest_val;
        m->host.val[j].index = msr;
        m->host.val[j].value = host_val;
 }
@@ -2594,7 +2600,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
                        guest_efer &= ~EFER_LME;
                if (guest_efer != host_efer)
                        add_atomic_switch_msr(vmx, MSR_EFER,
-                                             guest_efer, host_efer);
+                                             guest_efer, host_efer, false);
                return false;
        } else {
                guest_efer &= ~ignore_bits;
@@ -4057,7 +4063,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.ia32_xss = data;
                if (vcpu->arch.ia32_xss != host_xss)
                        add_atomic_switch_msr(vmx, MSR_IA32_XSS,
-                               vcpu->arch.ia32_xss, host_xss);
+                               vcpu->arch.ia32_xss, host_xss, false);
                else
                        clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
                break;
@@ -10076,7 +10082,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                        clear_atomic_switch_msr(vmx, msrs[i].msr);
                else
                        add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
-                                       msrs[i].host);
+                                       msrs[i].host, false);
 }
 
 static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)