]> git.itanic.dy.fi Git - linux-stable/commitdiff
KVM: selftests: Introduce rdmsr_from_l2() and use it for MSR-Bitmap tests
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 1 Nov 2022 14:54:23 +0000 (15:54 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 21 Nov 2022 11:42:39 +0000 (06:42 -0500)
Hyper-V MSR-Bitmap tests do RDMSR from L2 to exit to L1. While 'evmcs_test'
correctly clobbers all GPRs (which are not preserved), 'hyperv_svm_test'
does not. Introduce a more generic rdmsr_from_l2() to avoid code
duplication and remove hardcoding of MSRs.  Do not put it in common code
because it is really just a selftests bug rather than a processor
feature that requires it.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20221101145426.251680-46-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/x86_64/evmcs_test.c
tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c

index 74f076ba574b3215166ab22329c1481012aaac2c..58fa98512c24898097d6adb90131a46b771c74af 100644 (file)
@@ -30,22 +30,15 @@ static void guest_nmi_handler(struct ex_regs *regs)
 {
 }
 
-/* Exits to L1 destroy GRPs! */
-static inline void rdmsr_fs_base(void)
+static inline void rdmsr_from_l2(uint32_t msr)
 {
-       __asm__ __volatile__ ("mov $0xc0000100, %%rcx; rdmsr" : : :
-                             "rax", "rbx", "rcx", "rdx",
-                             "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
-                             "r13", "r14", "r15");
-}
-static inline void rdmsr_gs_base(void)
-{
-       __asm__ __volatile__ ("mov $0xc0000101, %%rcx; rdmsr" : : :
-                             "rax", "rbx", "rcx", "rdx",
-                             "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
-                             "r13", "r14", "r15");
+       /* Currently, L1 doesn't preserve GPRs during vmexits. */
+       __asm__ __volatile__ ("rdmsr" : : "c"(msr) :
+                             "rax", "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+                             "r10", "r11", "r12", "r13", "r14", "r15");
 }
 
+/* Exit to L1 from L2 with RDMSR instruction */
 void l2_guest_code(void)
 {
        GUEST_SYNC(7);
@@ -58,11 +51,11 @@ void l2_guest_code(void)
        vmcall();
 
        /* MSR-Bitmap tests */
-       rdmsr_fs_base(); /* intercepted */
-       rdmsr_fs_base(); /* intercepted */
-       rdmsr_gs_base(); /* not intercepted */
+       rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
+       rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
+       rdmsr_from_l2(MSR_GS_BASE); /* not intercepted */
        vmcall();
-       rdmsr_gs_base(); /* intercepted */
+       rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
 
        /* Done, exit to L1 and never come back.  */
        vmcall();
index 1c3fc38b4f15133d73b8e8594207c555054e3d62..3c9a2a1b4cfd8e871854b253369ec515f21ffbdf 100644 (file)
 
 #define L2_GUEST_STACK_SIZE 256
 
+/* Exit to L1 from L2 with RDMSR instruction */
+static inline void rdmsr_from_l2(uint32_t msr)
+{
+       /* Currently, L1 doesn't preserve GPRs during vmexits. */
+       __asm__ __volatile__ ("rdmsr" : : "c"(msr) :
+                             "rax", "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+                             "r10", "r11", "r12", "r13", "r14", "r15");
+}
+
 void l2_guest_code(void)
 {
        GUEST_SYNC(3);
@@ -30,11 +39,11 @@ void l2_guest_code(void)
        vmmcall();
 
        /* MSR-Bitmap tests */
-       rdmsr(MSR_FS_BASE); /* intercepted */
-       rdmsr(MSR_FS_BASE); /* intercepted */
-       rdmsr(MSR_GS_BASE); /* not intercepted */
+       rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
+       rdmsr_from_l2(MSR_FS_BASE); /* intercepted */
+       rdmsr_from_l2(MSR_GS_BASE); /* not intercepted */
        vmmcall();
-       rdmsr(MSR_GS_BASE); /* intercepted */
+       rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
 
        GUEST_SYNC(5);