[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH KERNEL 2/4] x86/xen: split smp.c for PV and PVHVM guests



More or less mechanically split smp.c into 3 files. XEN_PV_SMP and
XEN_PVHVM_SMP config options added to support the change.

Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
 arch/x86/xen/Kconfig      |   8 ++
 arch/x86/xen/Makefile     |   5 +-
 arch/x86/xen/enlighten.c  |   8 ++
 arch/x86/xen/smp.c        | 295 +++-------------------------------------------
 arch/x86/xen/smp.h        |  23 ++++
 arch/x86/xen/smp_common.c | 246 ++++++++++++++++++++++++++++++++++++++
 arch/x86/xen/smp_hvm.c    |  54 +++++++++
 7 files changed, 359 insertions(+), 280 deletions(-)
 create mode 100644 arch/x86/xen/smp_common.c
 create mode 100644 arch/x86/xen/smp_hvm.c

diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 8298378..e25c93e 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -21,6 +21,10 @@ config XEN_PV
        help
          Support running as a Xen PV guest.
 
+config XEN_PV_SMP
+       def_bool y
+       depends on XEN_PV && SMP
+
 config XEN_DOM0
        bool "Xen PV Dom0 support"
        default y
@@ -37,6 +41,10 @@ config XEN_PVHVM
        help
          Support running as a Xen PVHVM guest.
 
+config XEN_PVHVM_SMP
+       def_bool y
+       depends on XEN_PVHVM && SMP
+
 config XEN_512GB
        bool "Limit Xen pv-domain memory to 512GB"
        depends on XEN_PV && X86_64
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index e60fc93..aa6cd5e 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -20,7 +20,10 @@ obj-$(CONFIG_XEN_PVHVM)              += enlighten_hvm.o
 
 obj-$(CONFIG_EVENT_TRACING) += trace.o
 
-obj-$(CONFIG_SMP)              += smp.o
+obj-$(CONFIG_SMP)              += smp_common.o
+obj-$(CONFIG_XEN_PV_SMP)       += smp.o
+obj-$(CONFIG_XEN_PVHVM_SMP)    += smp_hvm.o
+
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
 obj-$(CONFIG_XEN_DOM0)         += vga.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 086c339..583221c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1603,12 +1603,20 @@ static int xen_cpu_up_prepare_pv(unsigned int cpu)
                     cpu, rc);
                return rc;
        }
+
+       rc = xen_smp_intr_init_pv(cpu);
+       if (rc) {
+               WARN(1, "xen_smp_intr_init_pv() for CPU %d failed: %d\n",
+                    cpu, rc);
+               return rc;
+       }
        return 0;
 }
 
 static int xen_cpu_dead_pv(unsigned int cpu)
 {
        xen_smp_intr_free(cpu);
+       xen_smp_intr_free_pv(cpu);
 
        if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
                xen_teardown_timer(cpu);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index bdb0d9c..0cb6d99 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -43,32 +43,11 @@
 
 cpumask_var_t xen_cpu_initialized_map;
 
-struct xen_common_irq {
-       int irq;
-       char *name;
-};
-static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
-static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
-static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq 
= -1 };
 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
-static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
 static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
 
-static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
-static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
 
-/*
- * Reschedule call back.
- */
-static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
-{
-       inc_irq_stat(irq_resched_count);
-       scheduler_ipi();
-
-       return IRQ_HANDLED;
-}
-
 static void cpu_bringup(void)
 {
        int cpu;
@@ -121,36 +100,8 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 }
 
-void xen_smp_intr_free(unsigned int cpu)
+void xen_smp_intr_free_pv(unsigned int cpu)
 {
-       if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
-               unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
-               per_cpu(xen_resched_irq, cpu).irq = -1;
-               kfree(per_cpu(xen_resched_irq, cpu).name);
-               per_cpu(xen_resched_irq, cpu).name = NULL;
-       }
-       if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
-               unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, 
NULL);
-               per_cpu(xen_callfunc_irq, cpu).irq = -1;
-               kfree(per_cpu(xen_callfunc_irq, cpu).name);
-               per_cpu(xen_callfunc_irq, cpu).name = NULL;
-       }
-       if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
-               unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
-               per_cpu(xen_debug_irq, cpu).irq = -1;
-               kfree(per_cpu(xen_debug_irq, cpu).name);
-               per_cpu(xen_debug_irq, cpu).name = NULL;
-       }
-       if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
-               unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
-                                      NULL);
-               per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
-               kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
-               per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
-       }
-       if (xen_hvm_domain())
-               return;
-
        if (per_cpu(xen_irq_work, cpu).irq >= 0) {
                unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
                per_cpu(xen_irq_work, cpu).irq = -1;
@@ -164,63 +115,12 @@ void xen_smp_intr_free(unsigned int cpu)
                kfree(per_cpu(xen_pmu_irq, cpu).name);
                per_cpu(xen_pmu_irq, cpu).name = NULL;
        }
-};
-int xen_smp_intr_init(unsigned int cpu)
+}
+
+int xen_smp_intr_init_pv(unsigned int cpu)
 {
        int rc;
-       char *resched_name, *callfunc_name, *debug_name, *pmu_name;
-
-       resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
-       rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
-                                   cpu,
-                                   xen_reschedule_interrupt,
-                                   IRQF_PERCPU|IRQF_NOBALANCING,
-                                   resched_name,
-                                   NULL);
-       if (rc < 0)
-               goto fail;
-       per_cpu(xen_resched_irq, cpu).irq = rc;
-       per_cpu(xen_resched_irq, cpu).name = resched_name;
-
-       callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
-       rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
-                                   cpu,
-                                   xen_call_function_interrupt,
-                                   IRQF_PERCPU|IRQF_NOBALANCING,
-                                   callfunc_name,
-                                   NULL);
-       if (rc < 0)
-               goto fail;
-       per_cpu(xen_callfunc_irq, cpu).irq = rc;
-       per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
-
-       debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
-       rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
-                                    IRQF_PERCPU | IRQF_NOBALANCING,
-                                    debug_name, NULL);
-       if (rc < 0)
-               goto fail;
-       per_cpu(xen_debug_irq, cpu).irq = rc;
-       per_cpu(xen_debug_irq, cpu).name = debug_name;
-
-       callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
-       rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
-                                   cpu,
-                                   xen_call_function_single_interrupt,
-                                   IRQF_PERCPU|IRQF_NOBALANCING,
-                                   callfunc_name,
-                                   NULL);
-       if (rc < 0)
-               goto fail;
-       per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
-       per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
-
-       /*
-        * The IRQ worker on PVHVM goes through the native path and uses the
-        * IPI mechanism.
-        */
-       if (xen_hvm_domain())
-               return 0;
+       char *callfunc_name, *pmu_name;
 
        callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
@@ -249,11 +149,10 @@ int xen_smp_intr_init(unsigned int cpu)
        return 0;
 
  fail:
-       xen_smp_intr_free(cpu);
+       xen_smp_intr_free_pv(cpu);
        return rc;
 }
 
-#ifdef CONFIG_XEN_PV
 static void __init xen_fill_possible_map(void)
 {
        int i, rc;
@@ -305,7 +204,6 @@ static void __init xen_filter_cpu_maps(void)
 #endif
 
 }
-#endif
 
 static void __init xen_smp_prepare_boot_cpu(void)
 {
@@ -313,7 +211,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
        native_smp_prepare_boot_cpu();
 
        if (xen_pv_domain()) {
-#ifdef CONFIG_XEN_PV
                if (!xen_feature(XENFEAT_writable_page_tables))
                        /* We've switched to the "real" per-cpu gdt, so make
                         * sure the old memory can be recycled. */
@@ -330,16 +227,9 @@ static void __init xen_smp_prepare_boot_cpu(void)
 
                xen_filter_cpu_maps();
                xen_setup_vcpu_info_placement();
-#endif
        }
 
        /*
-        * Setup vcpu_info for boot CPU.
-        */
-       if (xen_hvm_domain())
-               xen_vcpu_setup(0);
-
-       /*
         * The alternative logic (which patches the unlock/lock) runs before
         * the smp bootup up code is activated. Hence we need to set this up
         * the core kernel is being patched. Otherwise we will have only
@@ -348,7 +238,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
        xen_init_spinlocks();
 }
 
-#ifdef CONFIG_XEN_PV
 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
 {
        unsigned cpu;
@@ -380,6 +269,9 @@ static void __init xen_smp_prepare_cpus(unsigned int 
max_cpus)
        if (xen_smp_intr_init(0))
                BUG();
 
+       if (xen_smp_intr_init_pv(0))
+               BUG();
+
        if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
                panic("could not allocate xen_cpu_initialized_map\n");
 
@@ -489,7 +381,7 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct 
*idle)
 
        /*
         * PV VCPUs are always successfully taken down (see 'while' loop
-        * in xen_cpu_die()), so -EBUSY is an error.
+        * in xen_cpu_die_pv()), so -EBUSY is an error.
         */
        rc = cpu_check_up_prepare(cpu);
        if (rc)
@@ -552,7 +444,7 @@ static int xen_cpu_disable(void)
        return -ENOSYS;
 }
 
-static void xen_cpu_die(unsigned int cpu)
+static void xen_cpu_die_pv(unsigned int cpu)
 {
        BUG();
 }
@@ -581,159 +473,24 @@ static void xen_stop_other_cpus(int wait)
 {
        smp_call_function(stop_self, NULL, wait);
 }
-#endif /* CONFIG_XEN_PV */
 
-static void xen_cpu_die(unsigned int cpu)
+void xen_cpu_die_pv(unsigned int cpu)
 {
-       while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up,
-                                                    xen_vcpu_nr(cpu), NULL)) {
+       while (HYPERVISOR_vcpu_op(VCPUOP_is_up,
+                                 xen_vcpu_nr(cpu), NULL)) {
                __set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(HZ/10);
        }
 
        if (common_cpu_die(cpu) == 0) {
                xen_smp_intr_free(cpu);
+               xen_smp_intr_free_pv(cpu);
                xen_uninit_lock_cpu(cpu);
                xen_teardown_timer(cpu);
                xen_pmu_finish(cpu);
        }
 }
 
-static void xen_smp_send_reschedule(int cpu)
-{
-       xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
-}
-
-static void __xen_send_IPI_mask(const struct cpumask *mask,
-                             int vector)
-{
-       unsigned cpu;
-
-       for_each_cpu_and(cpu, mask, cpu_online_mask)
-               xen_send_IPI_one(cpu, vector);
-}
-
-static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
-{
-       int cpu;
-
-       __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
-
-       /* Make sure other vcpus get a chance to run if they need to. */
-       for_each_cpu(cpu, mask) {
-               if (xen_vcpu_stolen(cpu)) {
-                       HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
-                       break;
-               }
-       }
-}
-
-static void xen_smp_send_call_function_single_ipi(int cpu)
-{
-       __xen_send_IPI_mask(cpumask_of(cpu),
-                         XEN_CALL_FUNCTION_SINGLE_VECTOR);
-}
-
-static inline int xen_map_vector(int vector)
-{
-       int xen_vector;
-
-       switch (vector) {
-       case RESCHEDULE_VECTOR:
-               xen_vector = XEN_RESCHEDULE_VECTOR;
-               break;
-       case CALL_FUNCTION_VECTOR:
-               xen_vector = XEN_CALL_FUNCTION_VECTOR;
-               break;
-       case CALL_FUNCTION_SINGLE_VECTOR:
-               xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
-               break;
-       case IRQ_WORK_VECTOR:
-               xen_vector = XEN_IRQ_WORK_VECTOR;
-               break;
-#ifdef CONFIG_X86_64
-       case NMI_VECTOR:
-       case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
-               xen_vector = XEN_NMI_VECTOR;
-               break;
-#endif
-       default:
-               xen_vector = -1;
-               printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
-                       vector);
-       }
-
-       return xen_vector;
-}
-
-void xen_send_IPI_mask(const struct cpumask *mask,
-                             int vector)
-{
-       int xen_vector = xen_map_vector(vector);
-
-       if (xen_vector >= 0)
-               __xen_send_IPI_mask(mask, xen_vector);
-}
-
-void xen_send_IPI_all(int vector)
-{
-       int xen_vector = xen_map_vector(vector);
-
-       if (xen_vector >= 0)
-               __xen_send_IPI_mask(cpu_online_mask, xen_vector);
-}
-
-void xen_send_IPI_self(int vector)
-{
-       int xen_vector = xen_map_vector(vector);
-
-       if (xen_vector >= 0)
-               xen_send_IPI_one(smp_processor_id(), xen_vector);
-}
-
-void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
-                               int vector)
-{
-       unsigned cpu;
-       unsigned int this_cpu = smp_processor_id();
-       int xen_vector = xen_map_vector(vector);
-
-       if (!(num_online_cpus() > 1) || (xen_vector < 0))
-               return;
-
-       for_each_cpu_and(cpu, mask, cpu_online_mask) {
-               if (this_cpu == cpu)
-                       continue;
-
-               xen_send_IPI_one(cpu, xen_vector);
-       }
-}
-
-void xen_send_IPI_allbutself(int vector)
-{
-       xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
-{
-       irq_enter();
-       generic_smp_call_function_interrupt();
-       inc_irq_stat(irq_call_count);
-       irq_exit();
-
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
-{
-       irq_enter();
-       generic_smp_call_function_single_interrupt();
-       inc_irq_stat(irq_call_count);
-       irq_exit();
-
-       return IRQ_HANDLED;
-}
-
 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
 {
        irq_enter();
@@ -744,14 +501,13 @@ static irqreturn_t xen_irq_work_interrupt(int irq, void 
*dev_id)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_XEN_PV
 static const struct smp_ops xen_smp_ops __initconst = {
        .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
        .smp_prepare_cpus = xen_smp_prepare_cpus,
        .smp_cpus_done = xen_smp_cpus_done,
 
        .cpu_up = xen_cpu_up,
-       .cpu_die = xen_cpu_die,
+       .cpu_die = xen_cpu_die_pv,
        .cpu_disable = xen_cpu_disable,
        .play_dead = xen_play_dead,
 
@@ -767,22 +523,3 @@ void __init xen_smp_init(void)
        smp_ops = xen_smp_ops;
        xen_fill_possible_map();
 }
-#endif
-
-static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
-{
-       native_smp_prepare_cpus(max_cpus);
-       WARN_ON(xen_smp_intr_init(0));
-
-       xen_init_lock_cpu(0);
-}
-
-void __init xen_hvm_smp_init(void)
-{
-       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
-       smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
-       smp_ops.cpu_die = xen_cpu_die;
-       smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
-       smp_ops.send_call_func_single_ipi = 
xen_smp_send_call_function_single_ipi;
-       smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
-}
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index c5c16dc..94ed5cc 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -8,9 +8,22 @@ extern void xen_send_IPI_mask_allbutself(const struct cpumask 
*mask,
 extern void xen_send_IPI_allbutself(int vector);
 extern void xen_send_IPI_all(int vector);
 extern void xen_send_IPI_self(int vector);
+extern void xen_send_IPI_mask(const struct cpumask *mask, int vector);
 
 extern int xen_smp_intr_init(unsigned int cpu);
 extern void xen_smp_intr_free(unsigned int cpu);
+#ifdef CONFIG_XEN_PV
+extern int xen_smp_intr_init_pv(unsigned int cpu);
+extern void xen_smp_intr_free_pv(unsigned int cpu);
+#endif
+extern void xen_smp_send_reschedule(int cpu);
+extern void xen_smp_send_call_function_ipi(const struct cpumask *mask);
+extern void xen_smp_send_call_function_single_ipi(int cpu);
+
+struct xen_common_irq {
+       int irq;
+       char *name;
+};
 
 #else /* CONFIG_SMP */
 
@@ -18,6 +31,16 @@ static inline int xen_smp_intr_init(unsigned int cpu)
 {
        return 0;
 }
+
+#ifdef CONFIG_XEN_PV
+static inline int xen_smp_intr_init_pv(unsigned int cpu)
+{
+       return 0;
+}
+
+static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
+#endif
+
 static inline void xen_smp_intr_free(unsigned int cpu) {}
 #endif /* CONFIG_SMP */
 
diff --git a/arch/x86/xen/smp_common.c b/arch/x86/xen/smp_common.c
new file mode 100644
index 0000000..7a39cad
--- /dev/null
+++ b/arch/x86/xen/smp_common.c
@@ -0,0 +1,246 @@
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+
+#include <xen/events.h>
+
+#include "xen-ops.h"
+#include "pmu.h"
+#include "smp.h"
+
+static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
+static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
+static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq 
= -1 };
+static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
+
+/*
+ * Reschedule call back.
+ */
+static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
+{
+       inc_irq_stat(irq_resched_count);
+       scheduler_ipi();
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
+{
+       irq_enter();
+       generic_smp_call_function_interrupt();
+       inc_irq_stat(irq_call_count);
+       irq_exit();
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
+{
+       irq_enter();
+       generic_smp_call_function_single_interrupt();
+       inc_irq_stat(irq_call_count);
+       irq_exit();
+
+       return IRQ_HANDLED;
+}
+
+void xen_smp_intr_free(unsigned int cpu)
+{
+       if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
+               unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
+               per_cpu(xen_resched_irq, cpu).irq = -1;
+               kfree(per_cpu(xen_resched_irq, cpu).name);
+               per_cpu(xen_resched_irq, cpu).name = NULL;
+       }
+       if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
+               unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, 
NULL);
+               per_cpu(xen_callfunc_irq, cpu).irq = -1;
+               kfree(per_cpu(xen_callfunc_irq, cpu).name);
+               per_cpu(xen_callfunc_irq, cpu).name = NULL;
+       }
+       if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
+               unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
+               per_cpu(xen_debug_irq, cpu).irq = -1;
+               kfree(per_cpu(xen_debug_irq, cpu).name);
+               per_cpu(xen_debug_irq, cpu).name = NULL;
+       }
+       if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
+               unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
+                                      NULL);
+               per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
+               kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
+               per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
+       }
+}
+
+int xen_smp_intr_init(unsigned int cpu)
+{
+       int rc;
+       char *resched_name, *callfunc_name, *debug_name;
+
+       resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
+       rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
+                                   cpu,
+                                   xen_reschedule_interrupt,
+                                   IRQF_PERCPU|IRQF_NOBALANCING,
+                                   resched_name,
+                                   NULL);
+       if (rc < 0)
+               goto fail;
+       per_cpu(xen_resched_irq, cpu).irq = rc;
+       per_cpu(xen_resched_irq, cpu).name = resched_name;
+
+       callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
+       rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
+                                   cpu,
+                                   xen_call_function_interrupt,
+                                   IRQF_PERCPU|IRQF_NOBALANCING,
+                                   callfunc_name,
+                                   NULL);
+       if (rc < 0)
+               goto fail;
+       per_cpu(xen_callfunc_irq, cpu).irq = rc;
+       per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
+
+       debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
+       rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
+                                    IRQF_PERCPU | IRQF_NOBALANCING,
+                                    debug_name, NULL);
+       if (rc < 0)
+               goto fail;
+       per_cpu(xen_debug_irq, cpu).irq = rc;
+       per_cpu(xen_debug_irq, cpu).name = debug_name;
+
+       callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
+       rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
+                                   cpu,
+                                   xen_call_function_single_interrupt,
+                                   IRQF_PERCPU|IRQF_NOBALANCING,
+                                   callfunc_name,
+                                   NULL);
+       if (rc < 0)
+               goto fail;
+       per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
+       per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
+
+       return 0;
+
+ fail:
+       xen_smp_intr_free(cpu);
+       return rc;
+}
+
+void xen_smp_send_reschedule(int cpu)
+{
+       xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
+}
+
+static void __xen_send_IPI_mask(const struct cpumask *mask,
+                               int vector)
+{
+       unsigned cpu;
+
+       for_each_cpu_and(cpu, mask, cpu_online_mask)
+               xen_send_IPI_one(cpu, vector);
+}
+
+void xen_smp_send_call_function_ipi(const struct cpumask *mask)
+{
+       int cpu;
+
+       __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+
+       /* Make sure other vcpus get a chance to run if they need to. */
+       for_each_cpu(cpu, mask) {
+               if (xen_vcpu_stolen(cpu)) {
+                       HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
+                       break;
+               }
+       }
+}
+
+void xen_smp_send_call_function_single_ipi(int cpu)
+{
+       __xen_send_IPI_mask(cpumask_of(cpu),
+                           XEN_CALL_FUNCTION_SINGLE_VECTOR);
+}
+
+static inline int xen_map_vector(int vector)
+{
+       int xen_vector;
+
+       switch (vector) {
+       case RESCHEDULE_VECTOR:
+               xen_vector = XEN_RESCHEDULE_VECTOR;
+               break;
+       case CALL_FUNCTION_VECTOR:
+               xen_vector = XEN_CALL_FUNCTION_VECTOR;
+               break;
+       case CALL_FUNCTION_SINGLE_VECTOR:
+               xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
+               break;
+       case IRQ_WORK_VECTOR:
+               xen_vector = XEN_IRQ_WORK_VECTOR;
+               break;
+#ifdef CONFIG_X86_64
+       case NMI_VECTOR:
+       case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
+               xen_vector = XEN_NMI_VECTOR;
+               break;
+#endif
+       default:
+               xen_vector = -1;
+               printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
+                       vector);
+       }
+
+       return xen_vector;
+}
+
+void xen_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       int xen_vector = xen_map_vector(vector);
+
+       if (xen_vector >= 0)
+               __xen_send_IPI_mask(mask, xen_vector);
+}
+
+void xen_send_IPI_all(int vector)
+{
+       int xen_vector = xen_map_vector(vector);
+
+       if (xen_vector >= 0)
+               __xen_send_IPI_mask(cpu_online_mask, xen_vector);
+}
+
+void xen_send_IPI_self(int vector)
+{
+       int xen_vector = xen_map_vector(vector);
+
+       if (xen_vector >= 0)
+               xen_send_IPI_one(smp_processor_id(), xen_vector);
+}
+
+void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+                               int vector)
+{
+       unsigned cpu;
+       unsigned int this_cpu = smp_processor_id();
+       int xen_vector = xen_map_vector(vector);
+
+       if (!(num_online_cpus() > 1) || (xen_vector < 0))
+               return;
+
+       for_each_cpu_and(cpu, mask, cpu_online_mask) {
+               if (this_cpu == cpu)
+                       continue;
+
+               xen_send_IPI_one(cpu, xen_vector);
+       }
+}
+
+void xen_send_IPI_allbutself(int vector)
+{
+       xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
+}
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
new file mode 100644
index 0000000..ce4ff59
--- /dev/null
+++ b/arch/x86/xen/smp_hvm.c
@@ -0,0 +1,54 @@
+#include <asm/smp.h>
+
+#include "xen-ops.h"
+#include "smp.h"
+
+static void __init xen_smp_prepare_boot_cpu_hvm(void)
+{
+       BUG_ON(smp_processor_id() != 0);
+       native_smp_prepare_boot_cpu();
+
+       xen_vcpu_setup(0);
+
+       /*
+        * The alternative logic (which patches the unlock/lock) runs before
+        * the smp bootup up code is activated. Hence we need to set this up
+        * the core kernel is being patched. Otherwise we will have only
+        * modules patched but not core code.
+        */
+       xen_init_spinlocks();
+}
+
+static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
+{
+       native_smp_prepare_cpus(max_cpus);
+       WARN_ON(xen_smp_intr_init(0));
+
+       xen_init_lock_cpu(0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void xen_cpu_die_hvm(unsigned int cpu)
+{
+       if (common_cpu_die(cpu) == 0) {
+               xen_smp_intr_free(cpu);
+               xen_uninit_lock_cpu(cpu);
+               xen_teardown_timer(cpu);
+       }
+}
+#else
+static void xen_cpu_die_hvm(unsigned int cpu)
+{
+       BUG();
+}
+#endif
+
+void __init xen_hvm_smp_init(void)
+{
+       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+       smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
+       smp_ops.cpu_die = xen_cpu_die_hvm;
+       smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
+       smp_ops.send_call_func_single_ipi = 
xen_smp_send_call_function_single_ipi;
+       smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu_hvm;
+}
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.