[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] arm: use a per-VCPU stack



# HG changeset patch
# User Ian Campbell <Ian.Campbell@xxxxxxxxxx>
# Date 1329921202 0
# Node ID 4da12071496df60c68f0f496f58cf685e307f4f2
# Parent  40785b4790470b5180ded4e89e8c8b7919adb87c
arm: use a per-VCPU stack

We do not do any lazy state switching. Outside of context_switch() the current
stack is always that of the VCPU which current returns.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Ian Campbell <Ian.Campbell@xxxxxxxxxx>
---


diff -r 40785b479047 -r 4da12071496d xen/arch/arm/asm-offsets.c
--- a/xen/arch/arm/asm-offsets.c        Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/arch/arm/asm-offsets.c        Wed Feb 22 14:33:22 2012 +0000
@@ -7,6 +7,7 @@
 
 #include <xen/config.h>
 #include <xen/types.h>
+#include <xen/sched.h>
 #include <public/xen.h>
 #include <asm/current.h>
 
@@ -65,7 +66,10 @@
    BLANK();
 
    DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
+
+   OFFSET(VCPU_arch_saved_context, struct vcpu, arch.saved_context);
 }
+
 /*
  * Local variables:
  * mode: C
diff -r 40785b479047 -r 4da12071496d xen/arch/arm/domain.c
--- a/xen/arch/arm/domain.c     Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/arch/arm/domain.c     Wed Feb 22 14:33:22 2012 +0000
@@ -44,7 +44,7 @@
 
 static void ctxt_switch_from(struct vcpu *p)
 {
-
+    context_saved(p);
 }
 
 static void ctxt_switch_to(struct vcpu *n)
@@ -52,52 +52,36 @@
     p2m_load_VTTBR(n->domain);
 }
 
-static void __context_switch(void)
+static void schedule_tail(struct vcpu *prev)
 {
-    struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
-    unsigned int          cpu = smp_processor_id();
-    struct vcpu          *p = per_cpu(curr_vcpu, cpu);
-    struct vcpu          *n = current;
+    /* Re-enable interrupts before restoring state which may fault. */
+    local_irq_enable();
 
-    ASSERT(p != n);
-    ASSERT(cpumask_empty(n->vcpu_dirty_cpumask));
+    ctxt_switch_from(prev);
 
-    if ( !is_idle_vcpu(p) )
-    {
-        memcpy(&p->arch.user_regs, stack_regs, CTXT_SWITCH_STACK_BYTES);
-        ctxt_switch_from(p);
-    }
-
-    if ( !is_idle_vcpu(n) )
-    {
-        memcpy(stack_regs, &n->arch.user_regs, CTXT_SWITCH_STACK_BYTES);
-        ctxt_switch_to(n);
-    }
-
-    per_cpu(curr_vcpu, cpu) = n;
-
+    /* TODO
+       update_runstate_area(current);
+    */
+    ctxt_switch_to(current);
 }
 
-static void schedule_tail(struct vcpu *v)
+static void continue_new_vcpu(struct vcpu *prev)
 {
-    if ( is_idle_vcpu(v) )
-        continue_idle_domain(v);
+    schedule_tail(prev);
+
+    if ( is_idle_vcpu(current) )
+        continue_idle_domain(current);
     else
-        continue_nonidle_domain(v);
+        continue_nonidle_domain(current);
 }
 
 void context_switch(struct vcpu *prev, struct vcpu *next)
 {
-    unsigned int cpu = smp_processor_id();
-
     ASSERT(local_irq_is_enabled());
-
-    printk("context switch %d:%d%s -> %d:%d%s\n",
-           prev->domain->domain_id, prev->vcpu_id, is_idle_vcpu(prev) ? " 
(idle)" : "",
-           next->domain->domain_id, next->vcpu_id, is_idle_vcpu(next) ? " 
(idle)" : "");
+    ASSERT(prev != next);
+    ASSERT(cpumask_empty(next->vcpu_dirty_cpumask));
 
     /* TODO
-       if (prev != next)
        update_runstate_area(prev);
     */
 
@@ -105,60 +89,19 @@
 
     set_current(next);
 
-    if ( (per_cpu(curr_vcpu, cpu) == next) ||
-         (is_idle_vcpu(next) && cpu_online(cpu)) )
-    {
-        local_irq_enable();
-    }
-    else
-    {
-        __context_switch();
+    prev = __context_switch(prev, next);
 
-        /* Re-enable interrupts before restoring state which may fault. */
-        local_irq_enable();
-    }
-
-    context_saved(prev);
-
-    /* TODO
-       if (prev != next)
-       update_runstate_area(next);
-    */
-
-    schedule_tail(next);
-    BUG();
-
+    schedule_tail(prev);
 }
 
 void continue_running(struct vcpu *same)
 {
-    schedule_tail(same);
-    BUG();
-}
-
-int __sync_local_execstate(void)
-{
-    unsigned long flags;
-    int switch_required;
-
-    local_irq_save(flags);
-
-    switch_required = (this_cpu(curr_vcpu) != current);
-
-    if ( switch_required )
-    {
-        ASSERT(current == idle_vcpu[smp_processor_id()]);
-        __context_switch();
-    }
-
-    local_irq_restore(flags);
-
-    return switch_required;
+    /* Nothing to do */
 }
 
 void sync_local_execstate(void)
 {
-    (void)__sync_local_execstate();
+    /* Nothing to do -- no lazy switching */
 }
 
 void startup_cpu_idle_loop(void)
@@ -213,6 +156,18 @@
 {
     int rc = 0;
 
+    v->arch.stack = alloc_xenheap_pages(STACK_ORDER, 
MEMF_node(vcpu_to_node(v)));
+    if ( v->arch.stack == NULL )
+        return -ENOMEM;
+
+    v->arch.cpu_info = (struct cpu_info *)(v->arch.stack
+                                           + STACK_SIZE
+                                           - sizeof(struct cpu_info));
+
+    memset(&v->arch.saved_context, 0, sizeof(v->arch.saved_context));
+    v->arch.saved_context.sp = (uint32_t)v->arch.cpu_info;
+    v->arch.saved_context.pc = (uint32_t)continue_new_vcpu;
+
     if ( (rc = vcpu_vgic_init(v)) != 0 )
         return rc;
 
@@ -224,7 +179,7 @@
 
 void vcpu_destroy(struct vcpu *v)
 {
-
+    free_xenheap_pages(v->arch.stack, STACK_ORDER);
 }
 
 int arch_domain_create(struct domain *d, unsigned int domcr_flags)
diff -r 40785b479047 -r 4da12071496d xen/arch/arm/domain_build.c
--- a/xen/arch/arm/domain_build.c       Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/arch/arm/domain_build.c       Wed Feb 22 14:33:22 2012 +0000
@@ -5,6 +5,7 @@
 #include <xen/domain_page.h>
 #include <xen/sched.h>
 #include <asm/irq.h>
+#include <asm/regs.h>
 
 #include "gic.h"
 #include "kernel.h"
@@ -71,7 +72,7 @@
     int rc;
 
     struct vcpu *v = d->vcpu[0];
-    struct cpu_user_regs *regs = &v->arch.user_regs;
+    struct cpu_user_regs *regs = &v->arch.cpu_info->guest_cpu_user_regs;
 
     /* Sanity! */
     BUG_ON(d->domain_id != 0);
diff -r 40785b479047 -r 4da12071496d xen/arch/arm/entry.S
--- a/xen/arch/arm/entry.S      Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/arch/arm/entry.S      Wed Feb 22 14:33:22 2012 +0000
@@ -105,3 +105,19 @@
        pop {r0-r12}
        add sp, #(UREGS_R8_fiq - UREGS_sp); /* SP, LR, SPSR, PC */
        eret
+
+/*
+ * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
+ *
+ * r0 - prev
+ * r1 - next
+ *
+ * Returns prev in r0
+ */
+ENTRY(__context_switch)
+       add     ip, r0, #VCPU_arch_saved_context
+       stmia   ip!, {r4 - sl, fp, sp, lr}      /* Save register state */
+
+       add     r4, r1, #VCPU_arch_saved_context
+       ldmia   r4, {r4 - sl, fp, sp, pc}       /* Load registers and return */
+
diff -r 40785b479047 -r 4da12071496d xen/arch/arm/setup.c
--- a/xen/arch/arm/setup.c      Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/arch/arm/setup.c      Wed Feb 22 14:33:22 2012 +0000
@@ -42,7 +42,7 @@
 static unsigned int __initdata max_cpus = NR_CPUS;
 
 /* Xen stack for bringing up the first CPU. */
-unsigned char init_stack[STACK_SIZE] __attribute__((__aligned__(STACK_SIZE)));
+unsigned char __initdata init_stack[STACK_SIZE] 
__attribute__((__aligned__(STACK_SIZE)));
 
 extern char __init_begin[], __init_end[], __bss_start[];
 
@@ -61,7 +61,6 @@
 {
         scheduler_init();
         set_current(idle_vcpu[0]);
-        this_cpu(curr_vcpu) = current;
         /* TODO: setup_idle_pagetable(); */
 }
 
@@ -175,7 +174,7 @@
     console_init_preirq();
 #endif
 
-    set_current((struct vcpu *)0xfffff000); /* debug sanity */
+    __set_current((struct vcpu *)0xfffff000); /* debug sanity */
     idle_vcpu[0] = current;
     set_processor_id(0); /* needed early, for smp_processor_id() */
 
diff -r 40785b479047 -r 4da12071496d xen/include/asm-arm/current.h
--- a/xen/include/asm-arm/current.h     Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/include/asm-arm/current.h     Wed Feb 22 14:33:22 2012 +0000
@@ -5,50 +5,55 @@
 #include <xen/percpu.h>
 #include <public/xen.h>
 
+#include <asm/percpu.h>
+
 #ifndef __ASSEMBLY__
 
 struct vcpu;
 
+/*
+ * Which VCPU is "current" on this PCPU.
+ */
+DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
+
 struct cpu_info {
     struct cpu_user_regs guest_cpu_user_regs;
     unsigned long elr;
+    /* The following are valid iff this VCPU is current */
     unsigned int processor_id;
-    struct vcpu *current_vcpu;
     unsigned long per_cpu_offset;
+    unsigned int pad;
 };
 
 static inline struct cpu_info *get_cpu_info(void)
 {
-        register unsigned long sp asm ("sp");
-        return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + STACK_SIZE - 
sizeof(struct cpu_info));
+    register unsigned long sp asm ("sp");
+    return (struct cpu_info *)((sp & ~(STACK_SIZE - 1)) + STACK_SIZE - 
sizeof(struct cpu_info));
 }
 
-#define get_current()         (get_cpu_info()->current_vcpu)
-#define set_current(vcpu)     (get_cpu_info()->current_vcpu = (vcpu))
-#define current               (get_current())
-
 #define get_processor_id()    (get_cpu_info()->processor_id)
 #define set_processor_id(id)  do {                                      \
     struct cpu_info *ci__ = get_cpu_info();                             \
     ci__->per_cpu_offset = __per_cpu_offset[ci__->processor_id = (id)]; \
 } while (0)
 
+#define get_current()         (this_cpu(curr_vcpu))
+#define __set_current(vcpu)   (this_cpu(curr_vcpu) = (vcpu))
+#define set_current(vcpu)     do {                                      \
+    vcpu->arch.cpu_info->processor_id = get_processor_id();             \
+    __set_current(vcpu);                                                \
+} while (0)
+#define current               (get_current())
+
 #define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
 
 #define reset_stack_and_jump(__fn)              \
     __asm__ __volatile__ (                      \
         "mov sp,%0; b "STR(__fn)      \
         : : "r" (guest_cpu_user_regs()) : "memory" )
+
 #endif
 
-
-/*
- * Which VCPU's state is currently running on each CPU?
- * This is not necesasrily the same as 'current' as a CPU may be
- * executing a lazy state switch.
- */
-DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
-
 #endif /* __ARM_CURRENT_H__ */
 /*
  * Local variables:
diff -r 40785b479047 -r 4da12071496d xen/include/asm-arm/domain.h
--- a/xen/include/asm-arm/domain.h      Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/include/asm-arm/domain.h      Wed Feb 22 14:33:22 2012 +0000
@@ -47,7 +47,26 @@
 
 struct arch_vcpu
 {
-    struct cpu_user_regs user_regs;
+    struct {
+        uint32_t    r4;
+        uint32_t    r5;
+        uint32_t    r6;
+        uint32_t    r7;
+        uint32_t    r8;
+        uint32_t    r9;
+        uint32_t    sl;
+        uint32_t    fp;
+        uint32_t    sp;
+        uint32_t    pc;
+    } saved_context;
+
+    void *stack;
+
+    /*
+     * Points into ->stack, more convenient than doing pointer arith
+     * all the time.
+     */
+    struct cpu_info *cpu_info;
 
     uint32_t sctlr;
     uint32_t ttbr0, ttbr1, ttbcr;
diff -r 40785b479047 -r 4da12071496d xen/include/asm-arm/regs.h
--- a/xen/include/asm-arm/regs.h        Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/include/asm-arm/regs.h        Wed Feb 22 14:33:22 2012 +0000
@@ -28,9 +28,7 @@
     (diff == 0);                                                              \
 })
 
-#define return_reg(v) ((v)->arch.user_regs.r0)
-
-#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
+#define return_reg(v) ((v)->arch.cpu_info->guest_cpu_user_regs.r0)
 
 #endif /* __ARM_REGS_H__ */
 /*
diff -r 40785b479047 -r 4da12071496d xen/include/asm-arm/system.h
--- a/xen/include/asm-arm/system.h      Wed Feb 22 14:27:18 2012 +0000
+++ b/xen/include/asm-arm/system.h      Wed Feb 22 14:33:22 2012 +0000
@@ -191,6 +191,8 @@
     return !!(flags & PSR_FIQ_MASK);
 }
 
+extern struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next);
+
 #endif
 /*
  * Local variables:

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.