[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Ensure block/yield hypercalls always return a sane return code.



ChangeSet 1.1395, 2005/04/28 21:55:07+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Ensure block/yield hypercalls always return a sane return code.
        
        Ensure callers of __enter_scheduler take appropriate arch-specific
        action if no context switch occurs (callers from arch/x86 do not
        expect to return from a call into the scheduler).
        
        This fixes wildly unintuitive behaviour of do_block() for the
        VMX team.
        
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/ia64/xenmisc.c     |    5 +++++
 arch/x86/domain.c       |    5 +++++
 arch/x86/x86_32/entry.S |    8 +++++++-
 arch/x86/x86_64/entry.S |    8 +++++++-
 common/schedule.c       |    4 +++-
 include/xen/sched.h     |    7 ++++++-
 6 files changed, 33 insertions(+), 4 deletions(-)


diff -Nru a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c
--- a/xen/arch/ia64/xenmisc.c   2005-04-28 17:02:27 -04:00
+++ b/xen/arch/ia64/xenmisc.c   2005-04-28 17:02:27 -04:00
@@ -278,6 +278,11 @@
        if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
 }
 
+void continue_running(struct exec_domain *same)
+{
+    /* nothing to do */
+}
+
 void panic_domain(struct pt_regs *regs, const char *fmt, ...)
 {
        va_list args;
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     2005-04-28 17:02:27 -04:00
+++ b/xen/arch/x86/domain.c     2005-04-28 17:02:27 -04:00
@@ -794,7 +794,12 @@
     clear_bit(EDF_RUNNING, &prev->ed_flags);
 
     schedule_tail(next);
+    BUG();
+}
 
+void continue_running(struct exec_domain *same)
+{
+    schedule_tail(same);
     BUG();
 }
 
diff -Nru a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       2005-04-28 17:02:27 -04:00
+++ b/xen/arch/x86/x86_32/entry.S       2005-04-28 17:02:27 -04:00
@@ -652,6 +652,12 @@
         addl $16,%esp
         ret
 
+do_arch_sched_op:
+        # Ensure we return success even if we return via schedule_tail()
+        xorl %eax,%eax
+        movl %eax,UREGS_eax+4(%esp)
+        jmp  SYMBOL_NAME(do_sched_op)
+
 do_switch_vm86:
         # Discard the return address
         addl $4,%esp
@@ -718,7 +724,7 @@
         .long SYMBOL_NAME(do_stack_switch)
         .long SYMBOL_NAME(do_set_callbacks)
         .long SYMBOL_NAME(do_fpu_taskswitch)     /*  5 */
-        .long SYMBOL_NAME(do_sched_op)
+        .long SYMBOL_NAME(do_arch_sched_op)
         .long SYMBOL_NAME(do_dom0_op)
         .long SYMBOL_NAME(do_set_debugreg)
         .long SYMBOL_NAME(do_get_debugreg)
diff -Nru a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       2005-04-28 17:02:27 -04:00
+++ b/xen/arch/x86/x86_64/entry.S       2005-04-28 17:02:27 -04:00
@@ -523,6 +523,12 @@
         call  SYMBOL_NAME(do_nmi)
        jmp   restore_all_xen
 
+do_arch_sched_op:
+        # Ensure we return success even if we return via schedule_tail()
+        xorl  %eax,%eax
+        movq  %rax,UREGS_rax+8(%rsp)
+        jmp   SYMBOL_NAME(do_sched_op)
+
 .data
 
 ENTRY(exception_table)
@@ -554,7 +560,7 @@
         .quad SYMBOL_NAME(do_stack_switch)
         .quad SYMBOL_NAME(do_set_callbacks)
         .quad SYMBOL_NAME(do_fpu_taskswitch)     /*  5 */
-        .quad SYMBOL_NAME(do_sched_op)
+        .quad SYMBOL_NAME(do_arch_sched_op)
         .quad SYMBOL_NAME(do_dom0_op)
         .quad SYMBOL_NAME(do_set_debugreg)
         .quad SYMBOL_NAME(do_get_debugreg)
diff -Nru a/xen/common/schedule.c b/xen/common/schedule.c
--- a/xen/common/schedule.c     2005-04-28 17:02:27 -04:00
+++ b/xen/common/schedule.c     2005-04-28 17:02:27 -04:00
@@ -228,7 +228,9 @@
 
     /* Check for events /after/ blocking: avoids wakeup waiting race. */
     if ( event_pending(ed) )
+    {
         clear_bit(EDF_BLOCKED, &ed->ed_flags);
+    }
     else
     {
         TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->eid);
@@ -382,7 +384,7 @@
     spin_unlock_irq(&schedule_data[cpu].schedule_lock);
 
     if ( unlikely(prev == next) )
-        return;
+        return continue_running(prev);
     
     perfc_incrc(sched_ctx);
 
diff -Nru a/xen/include/xen/sched.h b/xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   2005-04-28 17:02:27 -04:00
+++ b/xen/include/xen/sched.h   2005-04-28 17:02:27 -04:00
@@ -210,7 +210,7 @@
     atomic_inc(&d->refcnt);
     ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
 }
-  
+
 extern struct domain *do_createdomain(
     domid_t dom_id, unsigned int cpu);
 extern int construct_dom0(
@@ -265,9 +265,14 @@
 extern void sync_lazy_execstate_all(void);
 extern int __sync_lazy_execstate(void);
 
+/* Called by the scheduler to switch to another exec_domain. */
 extern void context_switch(
     struct exec_domain *prev, 
     struct exec_domain *next);
+
+/* Called by the scheduler to continue running the current exec_domain. */
+extern void continue_running(
+    struct exec_domain *same);
 
 void domain_init(void);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.