]> git.itanic.dy.fi Git - linux-stable/commitdiff
KVM: x86: Make Hyper-V emulation optional
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 5 Dec 2023 10:36:26 +0000 (11:36 +0100)
committerSean Christopherson <seanjc@google.com>
Thu, 7 Dec 2023 17:34:57 +0000 (09:34 -0800)
Hyper-V emulation in KVM is a fairly big chunk and in some cases it may be
desirable to not compile it in to reduce module sizes as well as the attack
surface. Introduce CONFIG_KVM_HYPERV option to make it possible.

Note, there's room for further nVMX/nSVM code optimizations when
!CONFIG_KVM_HYPERV, this will be done in follow-up patches.

Reorganize Makefile a bit so all CONFIG_HYPERV and CONFIG_KVM_HYPERV files
are grouped together.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Tested-by: Jeremi Piotrowski <jpiotrowski@linux.microsoft.com>
Link: https://lore.kernel.org/r/20231205103630.1391318-13-vkuznets@redhat.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/irq_comm.c
arch/x86/kvm/svm/hyperv.h
arch/x86/kvm/vmx/hyperv.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 86069b985d227f923b5e4155fac326cbf91ee2a5..b093c2191cd3f7fdee3c9738b2da89400e68ca08 100644 (file)
@@ -937,8 +937,10 @@ struct kvm_vcpu_arch {
        /* used for guest single stepping over the given code position */
        unsigned long singlestep_rip;
 
+#ifdef CONFIG_KVM_HYPERV
        bool hyperv_enabled;
        struct kvm_vcpu_hv *hyperv;
+#endif
 #ifdef CONFIG_KVM_XEN
        struct kvm_vcpu_xen xen;
 #endif
@@ -1095,6 +1097,7 @@ enum hv_tsc_page_status {
        HV_TSC_PAGE_BROKEN,
 };
 
+#ifdef CONFIG_KVM_HYPERV
 /* Hyper-V emulation context */
 struct kvm_hv {
        struct mutex hv_lock;
@@ -1127,6 +1130,7 @@ struct kvm_hv {
 
        struct kvm_hv_syndbg hv_syndbg;
 };
+#endif
 
 struct msr_bitmap_range {
        u32 flags;
@@ -1349,7 +1353,9 @@ struct kvm_arch {
        /* reads protected by irq_srcu, writes by irq_lock */
        struct hlist_head mask_notifier_list;
 
+#ifdef CONFIG_KVM_HYPERV
        struct kvm_hv hyperv;
+#endif
 
 #ifdef CONFIG_KVM_XEN
        struct kvm_xen xen;
index c1716e83d1761282fc44543e9e9635b54292edcd..34f2f47cadf2b6687d220845aa5bb7d07a5df513 100644 (file)
@@ -141,6 +141,20 @@ config KVM_SMM
 
          If unsure, say Y.
 
+config KVM_HYPERV
+       bool "Support for Microsoft Hyper-V emulation"
+       depends on KVM
+       default y
+       help
+         Provides KVM support for emulating Microsoft Hyper-V.  This allows KVM
+         to expose a subset of the paravirtualized interfaces defined in the
+         Hyper-V Hypervisor Top-Level Functional Specification (TLFS):
+         https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
+         These interfaces are required for the correct and performant functioning
+         of Windows and Hyper-V guests on KVM.
+
+         If unsure, say "Y".
+
 config KVM_XEN
        bool "Support for Xen hypercall interface"
        depends on KVM
index 8ea872401cd6752220ba110b4d9397676c2bc554..475b5fa917a62d03d1a255234833140473b9c737 100644 (file)
@@ -11,29 +11,27 @@ include $(srctree)/virt/kvm/Makefile.kvm
 
 kvm-y                  += x86.o emulate.o i8259.o irq.o lapic.o \
                           i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
-                          hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
+                          debugfs.o mmu/mmu.o mmu/page_track.o \
                           mmu/spte.o
 
-ifdef CONFIG_HYPERV
-kvm-y                  += kvm_onhyperv.o
-endif
-
 kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
+kvm-$(CONFIG_KVM_HYPERV) += hyperv.o
 kvm-$(CONFIG_KVM_XEN)  += xen.o
 kvm-$(CONFIG_KVM_SMM)  += smm.o
 
 kvm-intel-y            += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
-                          vmx/hyperv.o vmx/hyperv_evmcs.o vmx/nested.o vmx/posted_intr.o
-kvm-intel-$(CONFIG_X86_SGX_KVM)        += vmx/sgx.o
+                          vmx/nested.o vmx/posted_intr.o
 
-ifdef CONFIG_HYPERV
-kvm-intel-y            += vmx/vmx_onhyperv.o
-endif
+kvm-intel-$(CONFIG_X86_SGX_KVM)        += vmx/sgx.o
+kvm-intel-$(CONFIG_KVM_HYPERV) += vmx/hyperv.o vmx/hyperv_evmcs.o
 
 kvm-amd-y              += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \
-                          svm/sev.o svm/hyperv.o
+                          svm/sev.o
+kvm-amd-$(CONFIG_KVM_HYPERV) += svm/hyperv.o
 
 ifdef CONFIG_HYPERV
+kvm-y                  += kvm_onhyperv.o
+kvm-intel-y            += vmx/vmx_onhyperv.o vmx/hyperv_evmcs.o
 kvm-amd-y              += svm/svm_onhyperv.o
 endif
 
index dda6fc4cfae886e9d1b1c6c458e87f2f2b67447a..1b278a3f0689506c0c37c5fdf3d23e68a53f9942 100644 (file)
@@ -314,11 +314,15 @@ EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
 
 static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct kvm_cpuid_entry2 *entry;
 
        entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
                                  KVM_CPUID_INDEX_NOT_SIGNIFICANT);
        return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
+#else
+       return false;
+#endif
 }
 
 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
@@ -433,11 +437,13 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
                return 0;
        }
 
+#ifdef CONFIG_KVM_HYPERV
        if (kvm_cpuid_has_hyperv(e2, nent)) {
                r = kvm_hv_vcpu_init(vcpu);
                if (r)
                        return r;
        }
+#endif
 
        r = kvm_check_cpuid(vcpu, e2, nent);
        if (r)
index 77f6549aa5de5f94633c80c0c1dec39381316df8..1dc0b6604526a1c629b3709d54c674f1ae1ce080 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/kvm_host.h>
 #include "x86.h"
 
+#ifdef CONFIG_KVM_HYPERV
+
 /* "Hv#1" signature */
 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
 
@@ -261,5 +263,62 @@ static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu,
 }
 
 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
+#else /* CONFIG_KVM_HYPERV */
+static inline void kvm_hv_setup_tsc_page(struct kvm *kvm,
+                                        struct pvclock_vcpu_time_info *hv_clock) {}
+static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {}
+static inline void kvm_hv_init_vm(struct kvm *kvm) {}
+static inline void kvm_hv_destroy_vm(struct kvm *kvm) {}
+static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+static inline void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+{
+       return HV_STATUS_ACCESS_DENIED;
+}
+static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {}
+static inline void kvm_hv_free_pa_page(struct kvm *kvm) {}
+static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
+{
+       return false;
+}
+static inline bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
+{
+       return false;
+}
+static inline void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) {}
+static inline bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) {}
+static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
+{
+       return vcpu->vcpu_idx;
+}
+static inline void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled) {}
+#endif /* CONFIG_KVM_HYPERV */
 
-#endif
+#endif /* __ARCH_X86_KVM_HYPERV_H__ */
index 16d076a1b91acc65aed41eed3536bad1744a8a1f..68f3f6c26046936601cb0e0a9383c8946ccad8ce 100644 (file)
@@ -144,7 +144,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
        return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
 }
 
-
+#ifdef CONFIG_KVM_HYPERV
 static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
                    struct kvm *kvm, int irq_source_id, int level,
                    bool line_status)
@@ -154,6 +154,7 @@ static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
 
        return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
 }
+#endif
 
 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
                              struct kvm *kvm, int irq_source_id, int level,
@@ -163,9 +164,11 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
        int r;
 
        switch (e->type) {
+#ifdef CONFIG_KVM_HYPERV
        case KVM_IRQ_ROUTING_HV_SINT:
                return kvm_hv_set_sint(e, kvm, irq_source_id, level,
                                       line_status);
+#endif
 
        case KVM_IRQ_ROUTING_MSI:
                if (kvm_msi_route_invalid(kvm, e))
@@ -314,11 +317,13 @@ int kvm_set_routing_entry(struct kvm *kvm,
                if (kvm_msi_route_invalid(kvm, e))
                        return -EINVAL;
                break;
+#ifdef CONFIG_KVM_HYPERV
        case KVM_IRQ_ROUTING_HV_SINT:
                e->set = kvm_hv_set_sint;
                e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
                e->hv_sint.sint = ue->u.hv_sint.sint;
                break;
+#endif
 #ifdef CONFIG_KVM_XEN
        case KVM_IRQ_ROUTING_XEN_EVTCHN:
                return kvm_xen_setup_evtchn(kvm, e, ue);
@@ -438,5 +443,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
 
 void kvm_arch_irq_routing_update(struct kvm *kvm)
 {
+#ifdef CONFIG_KVM_HYPERV
        kvm_hv_irq_routing_update(kvm);
+#endif
 }
index 02f4784b5d446b2f0a86604b589c85981b13e5d1..d3f8bfc05832ee0a2249cbaeed22f081027a9a2e 100644 (file)
@@ -11,6 +11,7 @@
 #include "../hyperv.h"
 #include "svm.h"
 
+#ifdef CONFIG_KVM_HYPERV
 static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -41,5 +42,13 @@ static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
 }
 
 void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
+#else /* CONFIG_KVM_HYPERV */
+static inline void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu) {}
+static inline bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
+{
+       return false;
+}
+static inline void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu) {}
+#endif /* CONFIG_KVM_HYPERV */
 
 #endif /* __ARCH_X86_KVM_SVM_HYPERV_H__ */
index 6e1ee951e360641260de8787eac8a6e305041b7a..0e90ef4efe341533ab67b2769390d5a9455acace 100644 (file)
@@ -9,11 +9,6 @@
 #define EVMPTR_INVALID (-1ULL)
 #define EVMPTR_MAP_PENDING (-2ULL)
 
-static inline bool evmptr_is_valid(u64 evmptr)
-{
-       return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
-}
-
 enum nested_evmptrld_status {
        EVMPTRLD_DISABLED,
        EVMPTRLD_SUCCEEDED,
@@ -21,6 +16,12 @@ enum nested_evmptrld_status {
        EVMPTRLD_ERROR,
 };
 
+#ifdef CONFIG_KVM_HYPERV
+static inline bool evmptr_is_valid(u64 evmptr)
+{
+       return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
+}
+
 static inline bool guest_cpuid_has_evmcs(struct kvm_vcpu *vcpu)
 {
        /*
@@ -39,5 +40,11 @@ void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *
 int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
 bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
 void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
+#else
+static inline bool evmptr_is_valid(u64 evmptr)
+{
+       return false;
+}
+#endif
 
 #endif /* __KVM_X86_VMX_HYPERV_H */
index 903b6f9ea2bdf9fadc2c23fc194bcce8116bbcb2..01a94d290c125fe5cc79373a6629b611e7b4b27f 100644 (file)
@@ -226,6 +226,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
 
 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
@@ -241,10 +242,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
                hv_vcpu->nested.vm_id = 0;
                hv_vcpu->nested.vp_id = 0;
        }
+#endif
 }
 
 static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        /*
         * When Enlightened VMEntry is enabled on the calling CPU we treat
@@ -264,6 +267,9 @@ static bool nested_evmcs_handle_vmclear(struct kvm_vcpu *vcpu, gpa_t vmptr)
                nested_release_evmcs(vcpu);
 
        return true;
+#else
+       return false;
+#endif
 }
 
 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
@@ -1595,6 +1601,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
 
 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
        struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
        struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu);
@@ -1835,10 +1842,14 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
         */
 
        return;
+#else /* CONFIG_KVM_HYPERV */
+       KVM_BUG_ON(1, vmx->vcpu.kvm);
+#endif /* CONFIG_KVM_HYPERV */
 }
 
 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
        struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
 
@@ -2009,6 +2020,9 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
        evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
 
        return;
+#else /* CONFIG_KVM_HYPERV */
+       KVM_BUG_ON(1, vmx->vcpu.kvm);
+#endif /* CONFIG_KVM_HYPERV */
 }
 
 /*
@@ -2018,6 +2032,7 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
        struct kvm_vcpu *vcpu, bool from_launch)
 {
+#ifdef CONFIG_KVM_HYPERV
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        bool evmcs_gpa_changed = false;
        u64 evmcs_gpa;
@@ -2099,6 +2114,9 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
        }
 
        return EVMPTRLD_SUCCEEDED;
+#else
+       return EVMPTRLD_DISABLED;
+#endif
 }
 
 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
@@ -2905,8 +2923,10 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
            nested_check_vm_entry_controls(vcpu, vmcs12))
                return -EINVAL;
 
+#ifdef CONFIG_KVM_HYPERV
        if (guest_cpuid_has_evmcs(vcpu))
                return nested_evmcs_check_controls(vmcs12);
+#endif
 
        return 0;
 }
@@ -3178,6 +3198,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+#ifdef CONFIG_KVM_HYPERV
 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3205,6 +3226,7 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
 
        return true;
 }
+#endif
 
 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
@@ -3296,6 +3318,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 
 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
 {
+#ifdef CONFIG_KVM_HYPERV
        /*
         * Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy
         * in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory
@@ -3312,6 +3335,7 @@ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
 
                return false;
        }
+#endif
 
        if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
                return false;
@@ -4749,6 +4773,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
        /* trying to cancel vmlaunch/vmresume is a bug */
        WARN_ON_ONCE(vmx->nested.nested_run_pending);
 
+#ifdef CONFIG_KVM_HYPERV
        if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
                /*
                 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
@@ -4758,6 +4783,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
                 */
                (void)nested_get_evmcs_page(vcpu);
        }
+#endif
 
        /* Service pending TLB flush requests for L2 before switching to L1. */
        kvm_service_local_tlb_flush_requests(vcpu);
@@ -6212,11 +6238,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
                 * Handle L2's bus locks in L0 directly.
                 */
                return true;
+#ifdef CONFIG_KVM_HYPERV
        case EXIT_REASON_VMCALL:
                /* Hyper-V L2 TLB flush hypercall is handled by L0 */
                return guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
                        nested_evmcs_l2_tlb_flush_enabled(vcpu) &&
                        kvm_hv_is_tlb_flush_hcall(vcpu);
+#endif
        default:
                break;
        }
@@ -7100,7 +7128,9 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
        .set_state = vmx_set_nested_state,
        .get_nested_state_pages = vmx_get_nested_state_pages,
        .write_log_dirty = nested_vmx_write_pml_buffer,
+#ifdef CONFIG_KVM_HYPERV
        .enable_evmcs = nested_enable_evmcs,
        .get_evmcs_version = nested_get_evmcs_version,
        .hv_inject_synthetic_vmexit_post_tlb_flush = vmx_hv_inject_synthetic_vmexit_post_tlb_flush,
+#endif
 };
index 552593a2ac148bb89024be34e72a5f1c1553bd94..3ff5c44dff9d5231e4f463aeac66d916021da54d 100644 (file)
@@ -2048,6 +2048,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
                                    &msr_info->data))
                        return 1;
+#ifdef CONFIG_KVM_HYPERV
                /*
                 * Enlightened VMCS v1 doesn't have certain VMCS fields but
                 * instead of just ignoring the features, different Hyper-V
@@ -2058,6 +2059,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
                        nested_evmcs_filter_control_msr(vcpu, msr_info->index,
                                                        &msr_info->data);
+#endif
                break;
        case MSR_IA32_RTIT_CTL:
                if (!vmx_pt_mode_is_host_guest())
index 81224b9676d9920cba777a007dc10baed0285a92..598b057611e029721c05106fac7463aa4fb4ac5f 100644 (file)
@@ -1504,6 +1504,8 @@ static unsigned num_msrs_to_save;
 static const u32 emulated_msrs_all[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
+
+#ifdef CONFIG_KVM_HYPERV
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
        HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
        HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
@@ -1521,6 +1523,7 @@ static const u32 emulated_msrs_all[] = {
        HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
        HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
        HV_X64_MSR_SYNDBG_PENDING_BUFFER,
+#endif
 
        MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
        MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
@@ -4020,6 +4023,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                 * the need to ignore the workaround.
                 */
                break;
+#ifdef CONFIG_KVM_HYPERV
        case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
        case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
        case HV_X64_MSR_SYNDBG_OPTIONS:
@@ -4032,6 +4036,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case HV_X64_MSR_TSC_INVARIANT_CONTROL:
                return kvm_hv_set_msr_common(vcpu, msr, data,
                                             msr_info->host_initiated);
+#endif
        case MSR_IA32_BBL_CR_CTL3:
                /* Drop writes to this legacy MSR -- see rdmsr
                 * counterpart for further detail.
@@ -4377,6 +4382,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                 */
                msr_info->data = 0x20000000;
                break;
+#ifdef CONFIG_KVM_HYPERV
        case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
        case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
        case HV_X64_MSR_SYNDBG_OPTIONS:
@@ -4390,6 +4396,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                return kvm_hv_get_msr_common(vcpu,
                                             msr_info->index, &msr_info->data,
                                             msr_info->host_initiated);
+#endif
        case MSR_IA32_BBL_CR_CTL3:
                /* This legacy MSR exists but isn't fully documented in current
                 * silicon.  It is however accessed by winxp in very narrow
@@ -4527,6 +4534,7 @@ static inline bool kvm_can_mwait_in_guest(void)
                boot_cpu_has(X86_FEATURE_ARAT);
 }
 
+#ifdef CONFIG_KVM_HYPERV
 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
                                            struct kvm_cpuid2 __user *cpuid_arg)
 {
@@ -4547,6 +4555,7 @@ static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
 
        return 0;
 }
+#endif
 
 static bool kvm_is_vm_type_supported(unsigned long type)
 {
@@ -4580,9 +4589,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_PIT_STATE2:
        case KVM_CAP_SET_IDENTITY_MAP_ADDR:
        case KVM_CAP_VCPU_EVENTS:
+#ifdef CONFIG_KVM_HYPERV
        case KVM_CAP_HYPERV:
        case KVM_CAP_HYPERV_VAPIC:
        case KVM_CAP_HYPERV_SPIN:
+       case KVM_CAP_HYPERV_TIME:
        case KVM_CAP_HYPERV_SYNIC:
        case KVM_CAP_HYPERV_SYNIC2:
        case KVM_CAP_HYPERV_VP_INDEX:
@@ -4592,6 +4603,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_HYPERV_CPUID:
        case KVM_CAP_HYPERV_ENFORCE_CPUID:
        case KVM_CAP_SYS_HYPERV_CPUID:
+#endif
        case KVM_CAP_PCI_SEGMENT:
        case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
@@ -4601,7 +4613,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_GET_TSC_KHZ:
        case KVM_CAP_KVMCLOCK_CTRL:
        case KVM_CAP_READONLY_MEM:
-       case KVM_CAP_HYPERV_TIME:
        case KVM_CAP_IOAPIC_POLARITY_IGNORED:
        case KVM_CAP_TSC_DEADLINE_TIMER:
        case KVM_CAP_DISABLE_QUIRKS:
@@ -4712,12 +4723,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = kvm_x86_ops.nested_ops->get_state ?
                        kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
                break;
+#ifdef CONFIG_KVM_HYPERV
        case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
                r = kvm_x86_ops.enable_l2_tlb_flush != NULL;
                break;
        case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
                r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
                break;
+#endif
        case KVM_CAP_SMALLER_MAXPHYADDR:
                r = (int) allow_smaller_maxphyaddr;
                break;
@@ -4884,9 +4897,11 @@ long kvm_arch_dev_ioctl(struct file *filp,
        case KVM_GET_MSRS:
                r = msr_io(NULL, argp, do_get_msr_feature, 1);
                break;
+#ifdef CONFIG_KVM_HYPERV
        case KVM_GET_SUPPORTED_HV_CPUID:
                r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
                break;
+#endif
        case KVM_GET_DEVICE_ATTR: {
                struct kvm_device_attr attr;
                r = -EFAULT;
@@ -5712,14 +5727,11 @@ static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
                                     struct kvm_enable_cap *cap)
 {
-       int r;
-       uint16_t vmcs_version;
-       void __user *user_ptr;
-
        if (cap->flags)
                return -EINVAL;
 
        switch (cap->cap) {
+#ifdef CONFIG_KVM_HYPERV
        case KVM_CAP_HYPERV_SYNIC2:
                if (cap->args[0])
                        return -EINVAL;
@@ -5731,16 +5743,22 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
                return kvm_hv_activate_synic(vcpu, cap->cap ==
                                             KVM_CAP_HYPERV_SYNIC2);
        case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
-               if (!kvm_x86_ops.nested_ops->enable_evmcs)
-                       return -ENOTTY;
-               r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
-               if (!r) {
-                       user_ptr = (void __user *)(uintptr_t)cap->args[0];
-                       if (copy_to_user(user_ptr, &vmcs_version,
-                                        sizeof(vmcs_version)))
-                               r = -EFAULT;
+               {
+                       int r;
+                       uint16_t vmcs_version;
+                       void __user *user_ptr;
+
+                       if (!kvm_x86_ops.nested_ops->enable_evmcs)
+                               return -ENOTTY;
+                       r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
+                       if (!r) {
+                               user_ptr = (void __user *)(uintptr_t)cap->args[0];
+                               if (copy_to_user(user_ptr, &vmcs_version,
+                                                sizeof(vmcs_version)))
+                                       r = -EFAULT;
+                       }
+                       return r;
                }
-               return r;
        case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
                if (!kvm_x86_ops.enable_l2_tlb_flush)
                        return -ENOTTY;
@@ -5749,6 +5767,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 
        case KVM_CAP_HYPERV_ENFORCE_CPUID:
                return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
+#endif
 
        case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
                vcpu->arch.pv_cpuid.enforce = cap->args[0];
@@ -6141,9 +6160,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        }
+#ifdef CONFIG_KVM_HYPERV
        case KVM_GET_SUPPORTED_HV_CPUID:
                r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
                break;
+#endif
 #ifdef CONFIG_KVM_XEN
        case KVM_XEN_VCPU_GET_ATTR: {
                struct kvm_xen_vcpu_attr xva;
@@ -7201,6 +7222,7 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
                r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, &region);
                break;
        }
+#ifdef CONFIG_KVM_HYPERV
        case KVM_HYPERV_EVENTFD: {
                struct kvm_hyperv_eventfd hvevfd;
 
@@ -7210,6 +7232,7 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
                r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
                break;
        }
+#endif
        case KVM_SET_PMU_EVENT_FILTER:
                r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
                break;
@@ -10588,19 +10611,20 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 
 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
 {
-       u64 eoi_exit_bitmap[4];
-
        if (!kvm_apic_hw_enabled(vcpu->arch.apic))
                return;
 
+#ifdef CONFIG_KVM_HYPERV
        if (to_hv_vcpu(vcpu)) {
+               u64 eoi_exit_bitmap[4];
+
                bitmap_or((ulong *)eoi_exit_bitmap,
                          vcpu->arch.ioapic_handled_vectors,
                          to_hv_synic(vcpu)->vec_bitmap, 256);
                static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
                return;
        }
-
+#endif
        static_call_cond(kvm_x86_load_eoi_exitmap)(
                vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
 }
@@ -10691,9 +10715,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                 * the flushes are considered "remote" and not "local" because
                 * the requests can be initiated from other vCPUs.
                 */
+#ifdef CONFIG_KVM_HYPERV
                if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) &&
                    kvm_hv_vcpu_flush_tlb(vcpu))
                        kvm_vcpu_flush_tlb_guest(vcpu);
+#endif
 
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -10746,6 +10772,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        vcpu_load_eoi_exitmap(vcpu);
                if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
                        kvm_vcpu_reload_apic_access_page(vcpu);
+#ifdef CONFIG_KVM_HYPERV
                if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
                        vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
@@ -10776,6 +10803,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                 */
                if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
                        kvm_hv_process_stimers(vcpu);
+#endif
                if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
                        kvm_vcpu_update_apicv(vcpu);
                if (kvm_check_request(KVM_REQ_APF_READY, vcpu))