[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/2] xen/x86: Move irq allocation from Xen smp_op.cpu_up()



Commit ce0d3c0a6fb1 ("genirq: Revert sparse irq locking around
__cpu_up() and move it to x86 for now") reverted irq locking
introduced by commit a89941816726 ("hotplug: Prevent alloc/free
of irq descriptors during cpu up/down") because of Xen allocating
irqs in both of its cpu_up ops.

We can move those allocations into CPU notifiers so that original
patch can be reinstated.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
---

v2: Add ifdef CONFIG_SMP to smp.h

 arch/x86/xen/enlighten.c | 61 +++++++++++++++++++++++++++++++++++++-----------
 arch/x86/xen/smp.c       | 45 ++---------------------------------
 arch/x86/xen/smp.h       | 13 +++++++++++
 3 files changed, 63 insertions(+), 56 deletions(-)

diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 8ffb089..c7f6b1f9 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -140,6 +140,8 @@ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
 __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
+static struct notifier_block xen_cpu_notifier;
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
@@ -1627,6 +1629,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
        xen_initial_gdt = &per_cpu(gdt_page, 0);
 
        xen_smp_init();
+       register_cpu_notifier(&xen_cpu_notifier);
 
 #ifdef CONFIG_ACPI_NUMA
        /*
@@ -1820,21 +1823,53 @@ static void __init init_hvm_pv_info(void)
        xen_domain_type = XEN_HVM_DOMAIN;
 }
 
-static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long 
action,
-                             void *hcpu)
+static int xen_cpu_notify(struct notifier_block *self, unsigned long action,
+                        void *hcpu)
 {
        int cpu = (long)hcpu;
+       int rc;
+
        switch (action) {
        case CPU_UP_PREPARE:
-               if (cpu_acpi_id(cpu) != U32_MAX)
-                       per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
-               else
-                       per_cpu(xen_vcpu_id, cpu) = cpu;
-               xen_vcpu_setup(cpu);
-               if (xen_have_vector_callback) {
-                       if (xen_feature(XENFEAT_hvm_safe_pvclock))
-                               xen_setup_timer(cpu);
+               if (xen_hvm_domain()) {
+                       /*
+                        * This can happen if CPU was offlined earlier and
+                        * offlining timed out in common_cpu_die().
+                        */
+                       if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
+                               xen_smp_intr_free(cpu);
+                               xen_uninit_lock_cpu(cpu);
+                       }
+
+                       if (cpu_acpi_id(cpu) != U32_MAX)
+                               per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
+                       else
+                               per_cpu(xen_vcpu_id, cpu) = cpu;
+                       xen_vcpu_setup(cpu);
                }
+
+               if (xen_pv_domain() ||
+                   (xen_have_vector_callback &&
+                    xen_feature(XENFEAT_hvm_safe_pvclock)))
+                       xen_setup_timer(cpu);
+
+               rc = xen_smp_intr_init(cpu);
+               if (rc) {
+                       WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
+                            cpu, rc);
+                       return NOTIFY_BAD;
+               }
+
+               break;
+       case CPU_ONLINE:
+               xen_init_lock_cpu(cpu);
+               break;
+       case CPU_UP_CANCELED:
+               xen_smp_intr_free(cpu);
+               if (xen_pv_domain() ||
+                   (xen_have_vector_callback &&
+                    xen_feature(XENFEAT_hvm_safe_pvclock)))
+                       xen_teardown_timer(cpu);
                break;
        default:
                break;
@@ -1842,8 +1877,8 @@ static int xen_hvm_cpu_notify(struct notifier_block 
*self, unsigned long action,
        return NOTIFY_OK;
 }
 
-static struct notifier_block xen_hvm_cpu_notifier = {
-       .notifier_call  = xen_hvm_cpu_notify,
+static struct notifier_block xen_cpu_notifier = {
+       .notifier_call  = xen_cpu_notify,
 };
 
 #ifdef CONFIG_KEXEC_CORE
@@ -1875,7 +1910,7 @@ static void __init xen_hvm_guest_init(void)
        if (xen_feature(XENFEAT_hvm_callback_vector))
                xen_have_vector_callback = 1;
        xen_hvm_smp_init();
-       register_cpu_notifier(&xen_hvm_cpu_notifier);
+       register_cpu_notifier(&xen_cpu_notifier);
        xen_unplug_emulated_devices();
        x86_init.irqs.intr_init = xen_init_IRQ;
        xen_hvm_init_time_ops();
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 0b4d04c..137afbb 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -115,7 +115,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
-static void xen_smp_intr_free(unsigned int cpu)
+void xen_smp_intr_free(unsigned int cpu)
 {
        if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
                unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
@@ -159,7 +159,7 @@ static void xen_smp_intr_free(unsigned int cpu)
                per_cpu(xen_pmu_irq, cpu).name = NULL;
        }
 };
-static int xen_smp_intr_init(unsigned int cpu)
+int xen_smp_intr_init(unsigned int cpu)
 {
        int rc;
        char *resched_name, *callfunc_name, *debug_name, *pmu_name;
@@ -475,8 +475,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct 
*idle)
        common_cpu_up(cpu, idle);
 
        xen_setup_runstate_info(cpu);
-       xen_setup_timer(cpu);
-       xen_init_lock_cpu(cpu);
 
        /*
         * PV VCPUs are always successfully taken down (see 'while' loop
@@ -495,10 +493,6 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct 
*idle)
 
        xen_pmu_init(cpu);
 
-       rc = xen_smp_intr_init(cpu);
-       if (rc)
-               return rc;
-
        rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
        BUG_ON(rc);
 
@@ -769,47 +763,12 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int 
max_cpus)
        xen_init_lock_cpu(0);
 }
 
-static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
-       int rc;
-
-       /*
-        * This can happen if CPU was offlined earlier and
-        * offlining timed out in common_cpu_die().
-        */
-       if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
-               xen_smp_intr_free(cpu);
-               xen_uninit_lock_cpu(cpu);
-       }
-
-       /*
-        * xen_smp_intr_init() needs to run before native_cpu_up()
-        * so that IPI vectors are set up on the booting CPU before
-        * it is marked online in native_cpu_up().
-       */
-       rc = xen_smp_intr_init(cpu);
-       WARN_ON(rc);
-       if (!rc)
-               rc =  native_cpu_up(cpu, tidle);
-
-       /*
-        * We must initialize the slowpath CPU kicker _after_ the native
-        * path has executed. If we initialized it before none of the
-        * unlocker IPI kicks would reach the booting CPU as the booting
-        * CPU had not set itself 'online' in cpu_online_mask. That mask
-        * is checked when IPIs are sent (on HVM at least).
-        */
-       xen_init_lock_cpu(cpu);
-       return rc;
-}
-
 void __init xen_hvm_smp_init(void)
 {
        if (!xen_have_vector_callback)
                return;
        smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
        smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
-       smp_ops.cpu_up = xen_hvm_cpu_up;
        smp_ops.cpu_die = xen_cpu_die;
        smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
        smp_ops.send_call_func_single_ipi = 
xen_smp_send_call_function_single_ipi;
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 963d62a..c5c16dc 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -1,5 +1,6 @@
 #ifndef _XEN_SMP_H
 
+#ifdef CONFIG_SMP
 extern void xen_send_IPI_mask(const struct cpumask *mask,
                              int vector);
 extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
@@ -8,6 +9,18 @@ extern void xen_send_IPI_allbutself(int vector);
 extern void xen_send_IPI_all(int vector);
 extern void xen_send_IPI_self(int vector);
 
+extern int xen_smp_intr_init(unsigned int cpu);
+extern void xen_smp_intr_free(unsigned int cpu);
+
+#else /* CONFIG_SMP */
+
+static inline int xen_smp_intr_init(unsigned int cpu)
+{
+       return 0;
+}
+static inline void xen_smp_intr_free(unsigned int cpu) {}
+#endif /* CONFIG_SMP */
+
 #ifdef CONFIG_XEN_PVH
 extern void xen_pvh_early_cpu_init(int cpu, bool entry);
 #else
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.