[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN] Scheduling hypercalls defer entry to the scheduler to softirq



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID cf98903ebb22f5a5aae73c56912bf936ab5787da
# Parent  5f7b5e5ca14b6c00d8ac23d1a2ece62fcbaebc03
[XEN] Scheduling hypercalls defer entry to the scheduler to softirq
context.

This avoids entering the scheduler with outstanding multicall state
and also happens to simplify the x86 entry protocol to the scheduler
hypercall (since we do not need to preset the return code).

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/ia64/vmx/vmx_support.c |    3 +--
 xen/arch/x86/x86_32/entry.S     |   22 ++++------------------
 xen/arch/x86/x86_64/entry.S     |   24 +++++-------------------
 xen/common/domain.c             |   14 ++++++++++++--
 xen/common/schedule.c           |   17 ++++++++---------
 5 files changed, 30 insertions(+), 50 deletions(-)

diff -r 5f7b5e5ca14b -r cf98903ebb22 xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c   Thu Nov 16 17:07:23 2006 +0000
+++ b/xen/arch/ia64/vmx/vmx_support.c   Thu Nov 16 18:28:05 2006 +0000
@@ -95,8 +95,7 @@ void vmx_send_assist_req(struct vcpu *v)
             break;
         }
 
-        /* I want to call __enter_scheduler() only */
-        do_sched_op_compat(SCHEDOP_yield, 0);
+        raise_softirq(SCHEDULE_SOFTIRQ);
         mb();
     }
 
diff -r 5f7b5e5ca14b -r cf98903ebb22 xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       Thu Nov 16 17:07:23 2006 +0000
+++ b/xen/arch/x86/x86_32/entry.S       Thu Nov 16 18:28:05 2006 +0000
@@ -597,20 +597,6 @@ ENTRY(setup_vm86_frame)
         addl $16,%esp
         ret
 
-do_arch_sched_op_compat:
-        # Ensure we return success even if we return via schedule_tail()
-        xorl %eax,%eax
-        GET_GUEST_REGS(%ecx)
-        movl %eax,UREGS_eax(%ecx)
-        jmp  do_sched_op_compat
-
-do_arch_sched_op:
-        # Ensure we return success even if we return via schedule_tail()
-        xorl %eax,%eax
-        GET_GUEST_REGS(%ecx)
-        movl %eax,UREGS_eax(%ecx)
-        jmp  do_sched_op
-
 .data
 
 ENTRY(exception_table)
@@ -642,7 +628,7 @@ ENTRY(hypercall_table)
         .long do_stack_switch
         .long do_set_callbacks
         .long do_fpu_taskswitch     /*  5 */
-        .long do_arch_sched_op_compat
+        .long do_sched_op_compat
         .long do_platform_op
         .long do_set_debugreg
         .long do_get_debugreg
@@ -665,7 +651,7 @@ ENTRY(hypercall_table)
         .long do_mmuext_op
         .long do_acm_op
         .long do_nmi_op
-        .long do_arch_sched_op
+        .long do_sched_op
         .long do_callback_op        /* 30 */
         .long do_xenoprof_op
         .long do_event_channel_op
@@ -684,7 +670,7 @@ ENTRY(hypercall_args_table)
         .byte 2 /* do_stack_switch      */
         .byte 4 /* do_set_callbacks     */
         .byte 1 /* do_fpu_taskswitch    */  /*  5 */
-        .byte 2 /* do_arch_sched_op_compat */
+        .byte 2 /* do_sched_op_compat   */
         .byte 1 /* do_platform_op       */
         .byte 2 /* do_set_debugreg      */
         .byte 1 /* do_get_debugreg      */
@@ -707,7 +693,7 @@ ENTRY(hypercall_args_table)
         .byte 4 /* do_mmuext_op         */
         .byte 1 /* do_acm_op            */
         .byte 2 /* do_nmi_op            */
-        .byte 2 /* do_arch_sched_op     */
+        .byte 2 /* do_sched_op          */
         .byte 2 /* do_callback_op       */  /* 30 */
         .byte 2 /* do_xenoprof_op       */
         .byte 2 /* do_event_channel_op  */
diff -r 5f7b5e5ca14b -r cf98903ebb22 xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Thu Nov 16 17:07:23 2006 +0000
+++ b/xen/arch/x86/x86_64/entry.S       Thu Nov 16 18:28:05 2006 +0000
@@ -497,20 +497,6 @@ nmi_in_hypervisor_mode:
         call  do_nmi
         jmp   ret_from_intr
 
-do_arch_sched_op_compat:
-        # Ensure we return success even if we return via schedule_tail()
-        xorl  %eax,%eax
-        GET_GUEST_REGS(%r10)
-        movq  %rax,UREGS_rax(%r10)
-        jmp   do_sched_op_compat
-
-do_arch_sched_op:
-        # Ensure we return success even if we return via schedule_tail()
-        xorl  %eax,%eax
-        GET_GUEST_REGS(%r10)
-        movq  %rax,UREGS_rax(%r10)
-        jmp   do_sched_op
-
 .data
 
 ENTRY(exception_table)
@@ -542,7 +528,7 @@ ENTRY(hypercall_table)
         .quad do_stack_switch
         .quad do_set_callbacks
         .quad do_fpu_taskswitch     /*  5 */
-        .quad do_arch_sched_op_compat
+        .quad do_sched_op_compat
         .quad do_platform_op
         .quad do_set_debugreg
         .quad do_get_debugreg
@@ -565,7 +551,7 @@ ENTRY(hypercall_table)
         .quad do_mmuext_op
         .quad do_acm_op
         .quad do_nmi_op
-        .quad do_arch_sched_op
+        .quad do_sched_op
         .quad do_callback_op        /* 30 */
         .quad do_xenoprof_op
         .quad do_event_channel_op
@@ -584,8 +570,8 @@ ENTRY(hypercall_args_table)
         .byte 2 /* do_stack_switch      */
         .byte 3 /* do_set_callbacks     */
         .byte 1 /* do_fpu_taskswitch    */  /*  5 */
-        .byte 2 /* do_arch_sched_op_compat */
-        .byte 1 /* do_platform_op           */
+        .byte 2 /* do_sched_op_compat   */
+        .byte 1 /* do_platform_op       */
         .byte 2 /* do_set_debugreg      */
         .byte 1 /* do_get_debugreg      */
         .byte 2 /* do_update_descriptor */  /* 10 */
@@ -607,7 +593,7 @@ ENTRY(hypercall_args_table)
         .byte 4 /* do_mmuext_op         */
         .byte 1 /* do_acm_op            */
         .byte 2 /* do_nmi_op            */
-        .byte 2 /* do_arch_sched_op     */
+        .byte 2 /* do_sched_op          */
         .byte 2 /* do_callback_op       */  /* 30 */
         .byte 2 /* do_xenoprof_op       */
         .byte 2 /* do_event_channel_op  */
diff -r 5f7b5e5ca14b -r cf98903ebb22 xen/common/domain.c
--- a/xen/common/domain.c       Thu Nov 16 17:07:23 2006 +0000
+++ b/xen/common/domain.c       Thu Nov 16 18:28:05 2006 +0000
@@ -258,8 +258,18 @@ void __domain_crash_synchronous(void)
 {
     __domain_crash(current->domain);
 
-    /* Flush multicall state before dying. */
-    this_cpu(mc_state).flags = 0;
+    /*
+     * Flush multicall state before dying if a multicall is in progress.
+     * This shouldn't be necessary, but some architectures are calling
+     * domain_crash_synchronous() when they really shouldn't (i.e., from
+     * within hypercall context).
+     */
+    if ( this_cpu(mc_state).flags != 0 )
+    {
+        dprintk(XENLOG_ERR,
+                "FIXME: synchronous domain crash during a multicall!\n");
+        this_cpu(mc_state).flags = 0;
+    }
 
     for ( ; ; )
         do_softirq();
diff -r 5f7b5e5ca14b -r cf98903ebb22 xen/common/schedule.c
--- a/xen/common/schedule.c     Thu Nov 16 17:07:23 2006 +0000
+++ b/xen/common/schedule.c     Thu Nov 16 18:28:05 2006 +0000
@@ -60,8 +60,6 @@ static struct scheduler *schedulers[] = 
     NULL
 };
 
-static void __enter_scheduler(void);
-
 static struct scheduler ops;
 
 #define SCHED_OP(fn, ...)                                 \
@@ -270,7 +268,7 @@ static long do_block(void)
     else
     {
         TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
-        __enter_scheduler();
+        raise_softirq(SCHEDULE_SOFTIRQ);
     }
 
     return 0;
@@ -315,9 +313,9 @@ static long do_poll(struct sched_poll *s
         set_timer(&v->poll_timer, sched_poll->timeout);
 
     TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
-    __enter_scheduler();
-
-    stop_timer(&v->poll_timer);
+    raise_softirq(SCHEDULE_SOFTIRQ);
+
+    return 0;
 
  out:
     clear_bit(_VCPUF_polling, &v->vcpu_flags);
@@ -329,7 +327,7 @@ static long do_yield(void)
 static long do_yield(void)
 {
     TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
-    __enter_scheduler();
+    raise_softirq(SCHEDULE_SOFTIRQ);
     return 0;
 }
 
@@ -540,7 +538,7 @@ long sched_adjust(struct domain *d, stru
  * - deschedule the current domain (scheduler independent).
  * - pick a new domain (scheduler dependent).
  */
-static void __enter_scheduler(void)
+static void schedule(void)
 {
     struct vcpu          *prev = current, *next = NULL;
     s_time_t              now = NOW();
@@ -549,6 +547,7 @@ static void __enter_scheduler(void)
     s32                   r_time;     /* time for new dom to run */
 
     ASSERT(!in_irq());
+    ASSERT(this_cpu(mc_state).flags == 0);
 
     perfc_incrc(sched_run);
 
@@ -679,7 +678,7 @@ void __init scheduler_init(void)
 {
     int i;
 
-    open_softirq(SCHEDULE_SOFTIRQ, __enter_scheduler);
+    open_softirq(SCHEDULE_SOFTIRQ, schedule);
 
     for_each_cpu ( i )
     {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.