]> git.itanic.dy.fi Git - linux-stable/commitdiff
x86: kvm: use alternatives for VMCALL vs. VMMCALL if kernel text is read-only
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 26 Nov 2014 20:56:31 +0000 (14:56 -0600)
committerJiri Slaby <jslaby@suse.cz>
Wed, 3 Dec 2014 11:07:27 +0000 (12:07 +0100)
commit c1118b3602c2329671ad5ec8bdf8e374323d6343 upstream.

On x86_64, kernel text mappings are mapped read-only with CONFIG_DEBUG_RODATA.
In that case, KVM will fail to patch VMCALL instructions to VMMCALL
as required on AMD processors.

The failure mode is currently a divide-by-zero exception, which obviously
is a KVM bug that has to be fixed.  However, picking the right instruction
between VMCALL and VMMCALL will be faster and will help if you cannot upgrade
the hypervisor.

Reported-by: Chris Webb <chris@arachsys.com>
Tested-by: Chris Webb <chris@arachsys.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Chris J Arges <chris.j.arges@canonical.com>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/kvm_para.h
arch/x86/kernel/cpu/amd.c

index 89270b4318db8b97ac467717b38863fa549435ad..c2f19a83498da257b35f854ab9faa63437a5178a 100644 (file)
 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
+#define X86_FEATURE_VMMCALL    (8*32+15) /* Prefer vmmcall to vmcall */
 
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
index c7678e43465bd64a3750bb912b9abc7984ac54cd..e62cf897f7819bc9795ab096a15689b3543e07b2 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_X86_KVM_PARA_H
 
 #include <asm/processor.h>
+#include <asm/alternative.h>
 #include <uapi/asm/kvm_para.h>
 
 extern void kvmclock_init(void);
@@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void)
 }
 #endif /* CONFIG_KVM_GUEST */
 
-/* This instruction is vmcall.  On non-VT architectures, it will generate a
- * trap that we will then rewrite to the appropriate instruction.
+#ifdef CONFIG_DEBUG_RODATA
+#define KVM_HYPERCALL \
+        ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
+#else
+/* On AMD processors, vmcall will generate a trap that we will
+ * then rewrite to the appropriate instruction.
  */
 #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
+#endif
 
 /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
  * instruction.  The hypervisor may replace it with something else but only the
index 28233b9e45cc1ff0dc7e5ab5c8228572e56b9bda..ee51e67df1b1803c8a5f622bccd1e1d871fc03cd 100644 (file)
@@ -509,6 +509,13 @@ static void early_init_amd(struct cpuinfo_x86 *c)
        }
 #endif
 
+       /*
+        * This is only needed to tell the kernel whether to use VMCALL
+        * and VMMCALL.  VMMCALL is never executed except under virt, so
+        * we can set it unconditionally.
+        */
+       set_cpu_cap(c, X86_FEATURE_VMMCALL);
+
        /* F16h erratum 793, CVE-2013-6885 */
        if (c->x86 == 0x16 && c->x86_model <= 0xf) {
                u64 val;