]> git.itanic.dy.fi Git - linux-stable/commitdiff
KVM: selftests: hyperv_svm_test: Introduce L2 TLB flush test
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 1 Nov 2022 14:54:25 +0000 (15:54 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 21 Nov 2022 11:42:43 +0000 (06:42 -0500)
Enable Hyper-V L2 TLB flush and check that Hyper-V TLB flush hypercalls
from L2 don't exit to L1 unless 'TlbLockCount' is set in the Partition
assist page.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20221101145426.251680-48-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/include/x86_64/svm.h
tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c

index 483e6ae12f69ee81cca2e722c445790858108f1b..4803e1056055879bb659bd55d5bbcbebd6859f0c 100644 (file)
@@ -76,6 +76,10 @@ struct hv_vmcb_enlightenments {
  */
 #define HV_VMCB_NESTED_ENLIGHTENMENTS (1U << 31)
 
+/* Synthetic VM-Exit */
+#define HV_SVM_EXITCODE_ENL                    0xf0000000
+#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH   (1)
+
 struct __attribute__ ((__packed__)) vmcb_control_area {
        u32 intercept_cr;
        u32 intercept_dr;
index 3c9a2a1b4cfd8e871854b253369ec515f21ffbdf..3b3cc94ba8e47bfb28b4f924968944431ffe2412 100644 (file)
@@ -34,6 +34,8 @@ static inline void rdmsr_from_l2(uint32_t msr)
 
 void l2_guest_code(void)
 {
+       u64 unused;
+
        GUEST_SYNC(3);
        /* Exit to L1 */
        vmmcall();
@@ -47,11 +49,28 @@ void l2_guest_code(void)
 
        GUEST_SYNC(5);
 
+       /* L2 TLB flush tests */
+       hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
+                        HV_HYPERCALL_FAST_BIT, 0x0,
+                        HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+                        HV_FLUSH_ALL_PROCESSORS);
+       rdmsr_from_l2(MSR_FS_BASE);
+       /*
+        * Note: hypercall status (RAX) is not preserved correctly by L1 after
+        * synthetic vmexit, use unchecked version.
+        */
+       __hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
+                          HV_HYPERCALL_FAST_BIT, 0x0,
+                          HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+                          HV_FLUSH_ALL_PROCESSORS, &unused);
+
        /* Done, exit to L1 and never come back.  */
        vmmcall();
 }
 
-static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
+static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm,
+                                                   struct hyperv_test_pages *hv_pages,
+                                                   vm_vaddr_t pgs_gpa)
 {
        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
        struct vmcb *vmcb = svm->vmcb;
@@ -59,13 +78,23 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
 
        GUEST_SYNC(1);
 
-       wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48);
+       wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+       wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
+       enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
 
        GUEST_ASSERT(svm->vmcb_gpa);
        /* Prepare for L2 execution. */
        generic_svm_setup(svm, l2_guest_code,
                          &l2_guest_stack[L2_GUEST_STACK_SIZE]);
 
+       /* L2 TLB flush setup */
+       hve->partition_assist_page = hv_pages->partition_assist_gpa;
+       hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+       hve->hv_vm_id = 1;
+       hve->hv_vp_id = 1;
+       current_vp_assist->nested_control.features.directhypercall = 1;
+       *(u32 *)(hv_pages->partition_assist) = 0;
+
        GUEST_SYNC(2);
        run_guest(vmcb, svm->vmcb_gpa);
        GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
@@ -100,6 +129,20 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
        GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
        vmcb->save.rip += 2; /* rdmsr */
 
+
+       /*
+        * L2 TLB flush test. First VMCALL should be handled directly by L0,
+        * no VMCALL exit expected.
+        */
+       run_guest(vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
+       vmcb->save.rip += 2; /* rdmsr */
+       /* Enable synthetic vmexit */
+       *(u32 *)(hv_pages->partition_assist) = 1;
+       run_guest(vmcb, svm->vmcb_gpa);
+       GUEST_ASSERT(vmcb->control.exit_code == HV_SVM_EXITCODE_ENL);
+       GUEST_ASSERT(vmcb->control.exit_info_1 == HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH);
+
        run_guest(vmcb, svm->vmcb_gpa);
        GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
        GUEST_SYNC(6);
@@ -109,8 +152,8 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
 
 int main(int argc, char *argv[])
 {
-       vm_vaddr_t nested_gva = 0;
-
+       vm_vaddr_t nested_gva = 0, hv_pages_gva = 0;
+       vm_vaddr_t hcall_page;
        struct kvm_vcpu *vcpu;
        struct kvm_vm *vm;
        struct kvm_run *run;
@@ -124,7 +167,13 @@ int main(int argc, char *argv[])
        vcpu_set_hv_cpuid(vcpu);
        run = vcpu->run;
        vcpu_alloc_svm(vm, &nested_gva);
-       vcpu_args_set(vcpu, 1, nested_gva);
+       vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
+
+       hcall_page = vm_vaddr_alloc_pages(vm, 1);
+       memset(addr_gva2hva(vm, hcall_page), 0x0,  getpagesize());
+
+       vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
+       vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
 
        for (stage = 1;; stage++) {
                vcpu_run(vcpu);