]> git.itanic.dy.fi Git - linux-stable/commitdiff
KVM: selftests: Stuff RAX/RCX with 'safe' values in vmmcall()/vmcall()
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 1 Nov 2022 14:54:22 +0000 (15:54 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 21 Nov 2022 11:42:38 +0000 (06:42 -0500)
vmmcall()/vmcall() are used to exit from L2 to L1 and no concrete hypercall
ABI is currenty followed. With the introduction of Hyper-V L2 TLB flush
it becomes (theoretically) possible that L0 will take responsibility for
handling the call and no L1 exit will happen. Prevent this by stuffing RAX
(KVM ABI) and RCX (Hyper-V ABI) with 'safe' values.

While on it, convert vmmcall() to 'static inline', make it setup stack
frame and move to include/x86_64/svm_util.h.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20221101145426.251680-45-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/include/x86_64/svm_util.h
tools/testing/selftests/kvm/include/x86_64/vmx.h

index a10f39affa45e498357b126f5fc9cbc7bca1346e..5d310abe6c3f4671b56c4cfbb8454ca84c16243e 100644 (file)
@@ -677,11 +677,6 @@ static inline void cpu_relax(void)
        asm volatile("rep; nop" ::: "memory");
 }
 
-#define vmmcall()              \
-       __asm__ __volatile__(   \
-               "vmmcall\n"     \
-               )
-
 #define ud2()                  \
        __asm__ __volatile__(   \
                "ud2\n" \
index 7aee6244ab6ac9e39b9343643fed683a44740179..044f0f872ba9d581350d6aaf14210f02a437dee8 100644 (file)
@@ -32,6 +32,20 @@ struct svm_test_data {
        uint64_t msr_gpa;
 };
 
+static inline void vmmcall(void)
+{
+       /*
+        * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
+        * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
+        * use of this function is to exit to L1 from L2.  Clobber all other
+        * GPRs as L1 doesn't correctly preserve them during vmexits.
+        */
+       __asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp"
+                            : : "a"(0xdeadbeef), "c"(0xbeefdead)
+                            : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+                              "r10", "r11", "r12", "r13", "r14", "r15");
+}
+
 #define stgi()                 \
        __asm__ __volatile__(   \
                "stgi\n"        \
index ef784bd6dfc2d7378c8d331e8a46ae3131dd30ca..5f0c0a29c556ea14bf0e098417cc6d69e4362bb7 100644 (file)
@@ -437,11 +437,16 @@ static inline int vmresume(void)
 
 static inline void vmcall(void)
 {
-       /* Currently, L1 destroys our GPRs during vmexits.  */
-       __asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
-                            "rax", "rbx", "rcx", "rdx",
-                            "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
-                            "r13", "r14", "r15");
+       /*
+        * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
+        * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
+        * use of this function is to exit to L1 from L2.  Clobber all other
+        * GPRs as L1 doesn't correctly preserve them during vmexits.
+        */
+       __asm__ __volatile__("push %%rbp; vmcall; pop %%rbp"
+                            : : "a"(0xdeadbeef), "c"(0xbeefdead)
+                            : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+                              "r10", "r11", "r12", "r13", "r14", "r15");
 }
 
 static inline int vmread(uint64_t encoding, uint64_t *value)