]> git.itanic.dy.fi Git - linux-stable/commitdiff
xen: Revert commits da72ff5bfcb0 and 72a9b186292d
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>
Mon, 24 Apr 2017 19:04:53 +0000 (15:04 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 14 May 2017 12:08:30 +0000 (14:08 +0200)
commit 84d582d236dc1f9085e741affc72e9ba061a67c2 upstream.

Recent discussion (http://marc.info/?l=xen-devel&m=149192184523741)
established that commit 72a9b186292d ("xen: Remove event channel
notification through Xen PCI platform device") (and thus commit
da72ff5bfcb0 ("partially revert "xen: Remove event channel
notification through Xen PCI platform device"")) are unnecessary and,
in fact, prevent HVM guests from booting on Xen releases prior to 4.0

Therefore we revert both of those commits.

The summary of that discussion is below:

  Here is the brief summary of the current situation:

  Before the offending commit (72a9b186292):

  1) INTx does not work because of the reset_watches path.
  2) The reset_watches path is only taken if you have Xen > 4.0
  3) The Linux Kernel by default will use vector inject if the hypervisor
     support. So even INTx does not work no body running the kernel with
     Xen > 4.0 would notice. Unless he explicitly disabled this feature
     either in the kernel or in Xen (and this can only be disabled by
     modifying the code, not user-supported way to do it).

  After the offending commit (+ partial revert):

  1) INTx is no longer support for HVM (only for PV guests).
  2) Any HVM guest The kernel will not boot on Xen < 4.0 which does
     not have vector injection support. Since the only other mode
     supported is INTx which.

  So based on this summary, I think before commit (72a9b186292) we were
  in much better position from a user point of view.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Ross Lagerwall <ross.lagerwall@citrix.com>
Cc: xen-devel@lists.xenproject.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Cc: Anthony Liguori <aliguori@amazon.com>
Cc: KarimAllah Ahmed <karahmed@amazon.de>
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/xen/events.h
arch/x86/pci/xen.c
arch/x86/xen/enlighten.c
arch/x86/xen/smp.c
arch/x86/xen/time.c
drivers/xen/events/events_base.c
drivers/xen/platform-pci.c
include/xen/xen.h

index 608a79d5a4669ebf3a9060f895d0f5f03ab3286c..e6911caf5bbf16ddc46430c44fcd4186258b713c 100644 (file)
@@ -20,4 +20,15 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
 /* No need for a barrier -- XCHG is a barrier on x86. */
 #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
 
+extern int xen_have_vector_callback;
+
+/*
+ * Events delivered via platform PCI interrupts are always
+ * routed to vcpu 0 and hence cannot be rebound.
+ */
+static inline bool xen_support_evtchn_rebind(void)
+{
+       return (!xen_hvm_domain() || xen_have_vector_callback);
+}
+
 #endif /* _ASM_X86_XEN_EVENTS_H */
index 292ab0364a89af9aa6bc93a2ad79a88d00fbad9d..c4b3646bd04c835692b7f28a531310087fbf99d1 100644 (file)
@@ -447,7 +447,7 @@ void __init xen_msi_init(void)
 
 int __init pci_xen_hvm_init(void)
 {
-       if (!xen_feature(XENFEAT_hvm_pirqs))
+       if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
                return 0;
 
 #ifdef CONFIG_ACPI
index 51ef952327257cb2a787b6743805e4cb677ecafc..6623867cc0d4746f90e1f91100ce379d4efa2311 100644 (file)
@@ -137,6 +137,8 @@ struct shared_info xen_dummy_shared_info;
 void *xen_initial_gdt;
 
 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
+__read_mostly int xen_have_vector_callback;
+EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
 static int xen_cpu_up_prepare(unsigned int cpu);
 static int xen_cpu_up_online(unsigned int cpu);
@@ -1508,7 +1510,10 @@ static void __init xen_pvh_early_guest_init(void)
        if (!xen_feature(XENFEAT_auto_translated_physmap))
                return;
 
-       BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
+       if (!xen_feature(XENFEAT_hvm_callback_vector))
+               return;
+
+       xen_have_vector_callback = 1;
 
        xen_pvh_early_cpu_init(0, false);
        xen_pvh_set_cr_flags(0);
@@ -1847,7 +1852,9 @@ static int xen_cpu_up_prepare(unsigned int cpu)
                xen_vcpu_setup(cpu);
        }
 
-       if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
+       if (xen_pv_domain() ||
+           (xen_have_vector_callback &&
+            xen_feature(XENFEAT_hvm_safe_pvclock)))
                xen_setup_timer(cpu);
 
        rc = xen_smp_intr_init(cpu);
@@ -1863,7 +1870,9 @@ static int xen_cpu_dead(unsigned int cpu)
 {
        xen_smp_intr_free(cpu);
 
-       if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
+       if (xen_pv_domain() ||
+           (xen_have_vector_callback &&
+            xen_feature(XENFEAT_hvm_safe_pvclock)))
                xen_teardown_timer(cpu);
 
        return 0;
@@ -1902,8 +1911,8 @@ static void __init xen_hvm_guest_init(void)
 
        xen_panic_handler_init();
 
-       BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
-
+       if (xen_feature(XENFEAT_hvm_callback_vector))
+               xen_have_vector_callback = 1;
        xen_hvm_smp_init();
        WARN_ON(xen_cpuhp_setup());
        xen_unplug_emulated_devices();
@@ -1941,7 +1950,7 @@ bool xen_hvm_need_lapic(void)
                return false;
        if (!xen_hvm_domain())
                return false;
-       if (xen_feature(XENFEAT_hvm_pirqs))
+       if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
                return false;
        return true;
 }
index 311acad7dad2abdc8501c70866674ed4f5858495..137afbbd05903baa700d9cce5d9707fac07f0062 100644 (file)
@@ -765,6 +765,8 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
 
 void __init xen_hvm_smp_init(void)
 {
+       if (!xen_have_vector_callback)
+               return;
        smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
        smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
        smp_ops.cpu_die = xen_cpu_die;
index 1e69956d7852bbfc8816acfb3bdd3d701ed99d59..4535627cf532f6d4bfee053c5a95234070b92120 100644 (file)
@@ -432,6 +432,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
 
 void __init xen_hvm_init_time_ops(void)
 {
+       /* vector callback is needed otherwise we cannot receive interrupts
+        * on cpu > 0 and at this point we don't know how many cpus are
+        * available */
+       if (!xen_have_vector_callback)
+               return;
        if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
                printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
                                "disable pv timer\n");
index fd8e872d29434633eb831e5077ff4d5d1dbd342c..86199f31bc57c0139967fd413583c3b8b6b8af9a 100644 (file)
@@ -1312,6 +1312,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
        if (!VALID_EVTCHN(evtchn))
                return -1;
 
+       if (!xen_support_evtchn_rebind())
+               return -1;
+
        /* Send future instances of this interrupt to other vcpu. */
        bind_vcpu.port = evtchn;
        bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
@@ -1645,15 +1648,20 @@ void xen_callback_vector(void)
 {
        int rc;
        uint64_t callback_via;
-
-       callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
-       rc = xen_set_callback_via(callback_via);
-       BUG_ON(rc);
-       pr_info("Xen HVM callback vector for event delivery is enabled\n");
-       /* in the restore case the vector has already been allocated */
-       if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
-               alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
-                               xen_hvm_callback_vector);
+       if (xen_have_vector_callback) {
+               callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
+               rc = xen_set_callback_via(callback_via);
+               if (rc) {
+                       pr_err("Request for Xen HVM callback vector failed\n");
+                       xen_have_vector_callback = 0;
+                       return;
+               }
+               pr_info("Xen HVM callback vector for event delivery is enabled\n");
+               /* in the restore case the vector has already been allocated */
+               if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+                       alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+                                       xen_hvm_callback_vector);
+       }
 }
 #else
 void xen_callback_vector(void) {}
index 2a165cc8a43cd6768529ffe48126c172e5b9f7df..1c4deac9b0f8529a4da44f792221d314ed12efe7 100644 (file)
@@ -67,7 +67,7 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
        pin = pdev->pin;
 
        /* We don't know the GSI. Specify the PCI INTx line instead. */
-       return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+       return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */
                ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
                ((uint64_t)pdev->bus->number << 16) |
                ((uint64_t)(pdev->devfn & 0xff) << 8) |
@@ -90,7 +90,7 @@ static int xen_allocate_irq(struct pci_dev *pdev)
 static int platform_pci_resume(struct pci_dev *pdev)
 {
        int err;
-       if (!xen_pv_domain())
+       if (xen_have_vector_callback)
                return 0;
        err = xen_set_callback_via(callback_via);
        if (err) {
@@ -138,14 +138,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
        platform_mmio = mmio_addr;
        platform_mmiolen = mmio_len;
 
-       /* 
-        * Xen HVM guests always use the vector callback mechanism.
-        * L1 Dom0 in a nested Xen environment is a PV guest inside in an
-        * HVM environment. It needs the platform-pci driver to get
-        * notifications from L0 Xen, but it cannot use the vector callback
-        * as it is not exported by L1 Xen.
-        */
-       if (xen_pv_domain()) {
+       if (!xen_have_vector_callback) {
                ret = xen_allocate_irq(pdev);
                if (ret) {
                        dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
index f0f0252cff9aab2442e8df64430ec118a2f391aa..0c0e3ef4c45dcfe7767af8c7f9d5ae2309e4efdc 100644 (file)
@@ -38,7 +38,8 @@ extern enum xen_domain_type xen_domain_type;
  */
 #include <xen/features.h>
 #define xen_pvh_domain() (xen_pv_domain() && \
-                         xen_feature(XENFEAT_auto_translated_physmap))
+                         xen_feature(XENFEAT_auto_translated_physmap) && \
+                         xen_have_vector_callback)
 #else
 #define xen_pvh_domain()       (0)
 #endif