[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v9 12/19] xen: add a hook to perform AP startup



AP startup on PVH follows the PV method, so we need to add a hook in
order to diverge from bare metal.
---
 sys/amd64/amd64/mp_machdep.c |   14 +++---
 sys/amd64/include/cpu.h      |    1 +
 sys/amd64/include/smp.h      |    1 +
 sys/x86/xen/hvm.c            |   12 +++++-
 sys/x86/xen/pv.c             |   85 ++++++++++++++++++++++++++++++++++++++++++
 sys/xen/pv.h                 |   32 ++++++++++++++++
 6 files changed, 137 insertions(+), 8 deletions(-)
 create mode 100644 sys/xen/pv.h

diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c
index 4ef4b3d..0738a37 100644
--- a/sys/amd64/amd64/mp_machdep.c
+++ b/sys/amd64/amd64/mp_machdep.c
@@ -90,7 +90,7 @@ extern  struct pcpu __pcpu[];
 
 /* AP uses this during bootstrap.  Do not staticize.  */
 char *bootSTK;
-static int bootAP;
+int bootAP;
 
 /* Free these after use */
 void *bootstacks[MAXCPU];
@@ -124,7 +124,8 @@ static u_long *ipi_hardclock_counts[MAXCPU];
 
 /* Default cpu_ops implementation. */
 struct cpu_ops cpu_ops = {
-       .ipi_vectored = lapic_ipi_vectored
+       .ipi_vectored = lapic_ipi_vectored,
+       .start_all_aps = native_start_all_aps,
 };
 
 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
@@ -138,7 +139,7 @@ extern int pmap_pcid_enabled;
 static volatile cpuset_t ipi_nmi_pending;
 
 /* used to hold the AP's until we are ready to release them */
-static struct mtx ap_boot_mtx;
+struct mtx ap_boot_mtx;
 
 /* Set to 1 once we're ready to let the APs out of the pen. */
 static volatile int aps_ready = 0;
@@ -165,7 +166,6 @@ static int cpu_cores;                       /* cores per 
package */
 
 static void    assign_cpu_ids(void);
 static void    set_interrupt_apic_ids(void);
-static int     start_all_aps(void);
 static int     start_ap(int apic_id);
 static void    release_aps(void *dummy);
 
@@ -569,7 +569,7 @@ cpu_mp_start(void)
        assign_cpu_ids();
 
        /* Start each Application Processor */
-       start_all_aps();
+       cpu_ops.start_all_aps();
 
        set_interrupt_apic_ids();
 }
@@ -908,8 +908,8 @@ assign_cpu_ids(void)
 /*
  * start each AP in our list
  */
-static int
-start_all_aps(void)
+int
+native_start_all_aps(void)
 {
        vm_offset_t va = boot_address + KERNBASE;
        u_int64_t *pt4, *pt3, *pt2;
diff --git a/sys/amd64/include/cpu.h b/sys/amd64/include/cpu.h
index 3d9ff531..ed9f1db 100644
--- a/sys/amd64/include/cpu.h
+++ b/sys/amd64/include/cpu.h
@@ -64,6 +64,7 @@ struct cpu_ops {
        void (*cpu_init)(void);
        void (*cpu_resume)(void);
        void (*ipi_vectored)(u_int, int);
+       int  (*start_all_aps)(void);
 };
 
 extern struct  cpu_ops cpu_ops;
diff --git a/sys/amd64/include/smp.h b/sys/amd64/include/smp.h
index d1b366b..15bc823 100644
--- a/sys/amd64/include/smp.h
+++ b/sys/amd64/include/smp.h
@@ -79,6 +79,7 @@ void  smp_masked_invlpg_range(cpuset_t mask, struct pmap 
*pmap,
            vm_offset_t startva, vm_offset_t endva);
 void   smp_invltlb(struct pmap *pmap);
 void   smp_masked_invltlb(cpuset_t mask, struct pmap *pmap);
+int    native_start_all_aps(void);
 
 #endif /* !LOCORE */
 #endif /* SMP */
diff --git a/sys/x86/xen/hvm.c b/sys/x86/xen/hvm.c
index fb1ed79..49caacf 100644
--- a/sys/x86/xen/hvm.c
+++ b/sys/x86/xen/hvm.c
@@ -53,6 +53,9 @@ __FBSDID("$FreeBSD$");
 #include <xen/hypervisor.h>
 #include <xen/hvm.h>
 #include <xen/xen_intr.h>
+#ifdef __amd64__
+#include <xen/pv.h>
+#endif
 
 #include <xen/interface/hvm/params.h>
 #include <xen/interface/vcpu.h>
@@ -119,7 +122,10 @@ enum xen_domain_type xen_domain_type = XEN_NATIVE;
 struct cpu_ops xen_hvm_cpu_ops = {
        .ipi_vectored   = lapic_ipi_vectored,
        .cpu_init       = xen_hvm_cpu_init,
-       .cpu_resume     = xen_hvm_cpu_resume
+       .cpu_resume     = xen_hvm_cpu_resume,
+#ifdef __amd64__
+       .start_all_aps = native_start_all_aps,
+#endif
 };
 
 static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
@@ -698,6 +704,10 @@ xen_hvm_init(enum xen_hvm_init_type init_type)
                setup_xen_features();
                cpu_ops = xen_hvm_cpu_ops;
                vm_guest = VM_GUEST_XEN;
+#ifdef __amd64__
+               if (xen_pv_domain())
+                       cpu_ops.start_all_aps = xen_pv_start_all_aps;
+#endif
                break;
        case XEN_HVM_INIT_RESUME:
                if (error != 0)
diff --git a/sys/x86/xen/pv.c b/sys/x86/xen/pv.c
index d11bc1a..22fd6a6 100644
--- a/sys/x86/xen/pv.c
+++ b/sys/x86/xen/pv.c
@@ -34,8 +34,11 @@ __FBSDID("$FreeBSD$");
 #include <sys/kernel.h>
 #include <sys/reboot.h>
 #include <sys/systm.h>
+#include <sys/malloc.h>
 #include <sys/lock.h>
 #include <sys/rwlock.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
 
 #include <vm/vm.h>
 #include <vm/vm_extern.h>
@@ -49,9 +52,13 @@ __FBSDID("$FreeBSD$");
 #include <machine/sysarch.h>
 #include <machine/clock.h>
 #include <machine/pc/bios.h>
+#include <machine/smp.h>
 
 #include <xen/xen-os.h>
 #include <xen/hypervisor.h>
+#include <xen/pv.h>
+
+#include <xen/interface/vcpu.h>
 
 /* Native initial function */
 extern u_int64_t hammer_time(u_int64_t, u_int64_t);
@@ -65,6 +72,15 @@ static caddr_t xen_pv_parse_preload_data(u_int64_t);
 static void xen_pv_parse_memmap(caddr_t, vm_paddr_t *, int *);
 
 static void xen_pv_set_init_ops(void);
+/*---------------------------- Extern Declarations 
---------------------------*/
+/* Variables used by amd64 mp_machdep to start APs */
+extern struct mtx ap_boot_mtx;
+extern void *bootstacks[];
+extern char *doublefault_stack;
+extern char *nmi_stack;
+extern void *dpcpu;
+extern int bootAP;
+extern char *bootSTK;
 
 /*-------------------------------- Global Data 
-------------------------------*/
 /* Xen init_ops implementation. */
@@ -168,6 +184,75 @@ hammer_time_xen(start_info_t *si, u_int64_t xenstack)
 }
 
 /*-------------------------------- PV specific 
-------------------------------*/
+
+static int
+start_xen_ap(int cpu)
+{
+       struct vcpu_guest_context *ctxt;
+       int ms, cpus = mp_naps;
+
+       ctxt = malloc(sizeof(*ctxt), M_TEMP, M_NOWAIT | M_ZERO);
+       if (ctxt == NULL)
+               panic("unable to allocate memory");
+
+       ctxt->flags = VGCF_IN_KERNEL;
+       ctxt->user_regs.rip = (unsigned long) init_secondary;
+       ctxt->user_regs.rsp = (unsigned long) bootSTK;
+
+       /* Set the AP to use the same page tables */
+       ctxt->ctrlreg[3] = KPML4phys;
+
+       if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
+               panic("unable to initialize AP#%d\n", cpu);
+
+       free(ctxt, M_TEMP);
+
+       /* Launch the vCPU */
+       if (HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
+               panic("unable to start AP#%d\n", cpu);
+
+       /* Wait up to 5 seconds for it to start. */
+       for (ms = 0; ms < 5000; ms++) {
+               if (mp_naps > cpus)
+                       return (1);     /* return SUCCESS */
+               DELAY(1000);
+       }
+
+       return (0);
+}
+
+int
+xen_pv_start_all_aps(void)
+{
+       int cpu;
+
+       mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
+
+       for (cpu = 1; cpu < mp_ncpus; cpu++) {
+
+               /* allocate and set up an idle stack data page */
+               bootstacks[cpu] = (void *)kmem_malloc(kernel_arena,
+                   KSTACK_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO);
+               doublefault_stack = (char *)kmem_malloc(kernel_arena,
+                   PAGE_SIZE, M_WAITOK | M_ZERO);
+               nmi_stack = (char *)kmem_malloc(kernel_arena, PAGE_SIZE,
+                   M_WAITOK | M_ZERO);
+               dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
+                   M_WAITOK | M_ZERO);
+
+               bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 
8;
+               bootAP = cpu;
+
+               /* attempt to start the Application Processor */
+               if (!start_xen_ap(cpu))
+                       panic("AP #%d failed to start!", cpu);
+
+               CPU_SET(cpu, &all_cpus);        /* record AP in CPU map */
+       }
+
+       return (mp_naps);
+}
+
 /*
  * Functions to convert the "extra" parameters passed by Xen
  * into FreeBSD boot options (from the i386 Xen port).
diff --git a/sys/xen/pv.h b/sys/xen/pv.h
new file mode 100644
index 0000000..45b7473
--- /dev/null
+++ b/sys/xen/pv.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013 Roger Pau Monnà <roger.pau@xxxxxxxxxx>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef        __XEN_PV_H__
+#define        __XEN_PV_H__
+
+int    xen_pv_start_all_aps(void);
+
+#endif /* __XEN_PV_H__ */
-- 
1.7.7.5 (Apple Git-26)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.