]> git.itanic.dy.fi Git - linux-stable/commitdiff
x86/bugs, KVM: Support the combination of guest and host IBRS
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 26 Apr 2018 02:04:19 +0000 (22:04 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Dec 2018 20:55:14 +0000 (21:55 +0100)
commit 5cf687548705412da47c9cec342fd952d71ed3d5 upstream.

A guest may modify the SPEC_CTRL MSR from the value used by the
kernel. Since the kernel doesn't use IBRS, this means a value of zero is
what is needed in the host.

But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
the other bits as reserved so the kernel should respect the boot time
SPEC_CTRL value and use that.

This allows to deal with future extensions to the SPEC_CTRL interface if
any at all.

Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
assembler code.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
[bwh: Backported to 4.4: This was partly applied before; apply just the
 missing bits]
Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c

index e1f20e0d62c294171f17b9fd7e729edffe509439..f863035927684cec6cacc813d0e4f50872485f5e 100644 (file)
@@ -3904,8 +3904,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * is no need to worry about the conditional branch over the wrmsr
         * being speculatively taken.
         */
-       if (svm->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+       x86_spec_ctrl_set_guest(svm->spec_ctrl);
 
        asm volatile (
                "push %%" _ASM_BP "; \n\t"
@@ -4017,8 +4016,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
                svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       if (svm->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+       x86_spec_ctrl_restore_host(svm->spec_ctrl);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
index f7b5c009859e1aa2715ceb2ef77be484eed5eb6b..0fffd247037bad3400b98384a83987b33b43c964 100644 (file)
@@ -8658,8 +8658,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * is no need to worry about the conditional branch over the wrmsr
         * being speculatively taken.
         */
-       if (vmx->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+       x86_spec_ctrl_set_guest(vmx->spec_ctrl);
 
        vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
@@ -8797,8 +8796,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
                vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       if (vmx->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+       x86_spec_ctrl_restore_host(vmx->spec_ctrl);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();