[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Clean up and fix domain_pause and friends. Synchronous
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 83c73802f02a0841d3ad697273de76654527f24b # Parent 9312a3e8a6f822db5ac5c3502334a8fa08461572 Clean up and fix domain_pause and friends. Synchronous pause should not only wait for the running flag to clear, but also for the scheduler lock to be released. Also get rid of some unused sync_lazy_execstate functions. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/ia64/linux-xen/irq_ia64.c --- a/xen/arch/ia64/linux-xen/irq_ia64.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c Fri Aug 26 09:29:54 2005 @@ -265,7 +265,7 @@ */ vmx_irq_exit(); if ( wake_dom0 && current != dom0 ) - domain_wake(dom0->vcpu[0]); + vcpu_wake(dom0->vcpu[0]); } #endif diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Fri Aug 26 09:29:54 2005 @@ -116,7 +116,7 @@ + */ + vmx_irq_exit(); + if ( wake_dom0 && current != dom0 ) -+ domain_wake(dom0->vcpu[0]); ++ vcpu_wake(dom0->vcpu[0]); +} +#endif + diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c --- a/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c Fri Aug 26 09:29:54 2005 @@ -73,7 +73,7 @@ +#endif + //FIXME: TEMPORARY HACK!!!! + vcpu_pend_interrupt(dom0->vcpu[0],vector); -+ domain_wake(dom0->vcpu[0]); ++ vcpu_wake(dom0->vcpu[0]); + } + else +#endif diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/ia64/patch/linux-2.6.7/time.c --- a/xen/arch/ia64/patch/linux-2.6.7/time.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/ia64/patch/linux-2.6.7/time.c Fri Aug 26 09:29:54 2005 @@ -209,14 +209,14 @@ + if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) { + vcpu_pend_timer(dom0->vcpu[0]); + //vcpu_set_next_timer(dom0->vcpu[0]); -+ domain_wake(dom0->vcpu[0]); ++ vcpu_wake(dom0->vcpu[0]); + } + if (!is_idle_task(current->domain) && current->domain != dom0) { + if (vcpu_timer_expired(current)) { + vcpu_pend_timer(current); + // ensure another timer interrupt happens even if domain doesn't + vcpu_set_next_timer(current); -+ domain_wake(current); ++ vcpu_wake(current); + } + } + raise_actimer_softirq(); diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/ia64/xenirq.c --- a/xen/arch/ia64/xenirq.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/ia64/xenirq.c Fri Aug 26 09:29:54 2005 @@ -50,7 +50,7 @@ #endif //FIXME: TEMPORARY HACK!!!! vcpu_pend_interrupt(dom0->vcpu[0],vector); - domain_wake(dom0->vcpu[0]); + vcpu_wake(dom0->vcpu[0]); return(1); } return(0); diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/ia64/xenmisc.c --- a/xen/arch/ia64/xenmisc.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/ia64/xenmisc.c Fri Aug 26 09:29:54 2005 @@ -59,8 +59,6 @@ /* calls in xen/common code that are unused on ia64 */ void sync_lazy_execstate_cpu(unsigned int cpu) {} -void sync_lazy_execstate_mask(cpumask_t mask) {} -void sync_lazy_execstate_all(void) {} #ifdef CONFIG_VTI int grant_table_create(struct domain *d) { return 0; } diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/ia64/xentime.c --- a/xen/arch/ia64/xentime.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/ia64/xentime.c Fri Aug 26 09:29:54 2005 @@ -162,14 +162,14 @@ if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) { vcpu_pend_timer(dom0->vcpu[0]); //vcpu_set_next_timer(dom0->vcpu[0]); - domain_wake(dom0->vcpu[0]); + vcpu_wake(dom0->vcpu[0]); } if (!is_idle_task(current->domain) && current->domain != dom0) { if (vcpu_timer_expired(current)) { vcpu_pend_timer(current); // ensure another timer interrupt happens even if domain doesn't vcpu_set_next_timer(current); - domain_wake(current); + vcpu_wake(current); } } raise_actimer_softirq(); diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/x86/audit.c --- a/xen/arch/x86/audit.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/x86/audit.c Fri Aug 26 09:29:54 2005 @@ -735,7 +735,6 @@ if ( d != current->domain ) domain_pause(d); - sync_lazy_execstate_all(); // Maybe we should just be using BIGLOCK? // diff -r 9312a3e8a6f8 -r 83c73802f02a xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Fri Aug 26 09:05:43 2005 +++ b/xen/arch/x86/domain.c Fri Aug 26 09:29:54 2005 @@ -888,24 +888,14 @@ void sync_lazy_execstate_cpu(unsigned int cpu) { if ( cpu == smp_processor_id() ) + { (void)__sync_lazy_execstate(); + } else + { + /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ flush_tlb_mask(cpumask_of_cpu(cpu)); -} - -void sync_lazy_execstate_mask(cpumask_t mask) -{ - if ( cpu_isset(smp_processor_id(), mask) ) - (void)__sync_lazy_execstate(); - /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ - flush_tlb_mask(mask); -} - -void sync_lazy_execstate_all(void) -{ - __sync_lazy_execstate(); - /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ - flush_tlb_mask(cpu_online_map); + } } unsigned long __hypercall_create_continuation( diff -r 9312a3e8a6f8 -r 83c73802f02a xen/common/domain.c --- a/xen/common/domain.c Fri Aug 26 09:05:43 2005 +++ b/xen/common/domain.c Fri Aug 26 09:29:54 2005 @@ -152,10 +152,7 @@ /* Make sure that every vcpu is descheduled before we finalise. */ for_each_vcpu ( d, v ) - while ( test_bit(_VCPUF_running, &v->vcpu_flags) ) - cpu_relax(); - - sync_lazy_execstate_mask(d->cpumask); + vcpu_sleep_sync(v); BUG_ON(!cpus_empty(d->cpumask)); sync_pagetable_state(d); @@ -209,7 +206,7 @@ /* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */ for_each_vcpu ( d, v ) - domain_sleep_nosync(v); + vcpu_sleep_nosync(v); } @@ -226,7 +223,7 @@ for_each_vcpu ( d, v ) { set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags); - domain_sleep_nosync(v); + vcpu_sleep_nosync(v); } send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER); @@ -275,7 +272,7 @@ { BUG_ON(v == current); atomic_inc(&v->pausecnt); - domain_sleep_sync(v); + vcpu_sleep_sync(v); } void domain_pause(struct domain *d) @@ -286,7 +283,7 @@ { BUG_ON(v == current); atomic_inc(&v->pausecnt); - domain_sleep_sync(v); + vcpu_sleep_sync(v); } } @@ -294,7 +291,7 @@ { BUG_ON(v == current); if ( atomic_dec_and_test(&v->pausecnt) ) - domain_wake(v); + vcpu_wake(v); } void domain_unpause(struct domain *d) @@ -313,7 +310,7 @@ { BUG_ON(v == current); if ( !test_and_set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) ) - domain_sleep_sync(v); + vcpu_sleep_sync(v); } } @@ -324,7 +321,7 @@ for_each_vcpu ( d, v ) { if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) ) - domain_wake(v); + vcpu_wake(v); } } @@ -413,7 +410,7 @@ /* domain_unpause_by_systemcontroller */ if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) ) - domain_wake(v); + vcpu_wake(v); xfree(c); return 0; diff -r 9312a3e8a6f8 -r 83c73802f02a xen/common/schedule.c --- a/xen/common/schedule.c Fri Aug 26 09:05:43 2005 +++ b/xen/common/schedule.c Fri Aug 26 09:29:54 2005 @@ -193,7 +193,7 @@ TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id); } -void domain_sleep_nosync(struct vcpu *v) +void vcpu_sleep_nosync(struct vcpu *v) { unsigned long flags; @@ -205,18 +205,25 @@ TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id); } -void domain_sleep_sync(struct vcpu *v) -{ - domain_sleep_nosync(v); - - while ( test_bit(_VCPUF_running, &v->vcpu_flags) && !domain_runnable(v) ) +void vcpu_sleep_sync(struct vcpu *v) +{ + vcpu_sleep_nosync(v); + + /* + * We can be sure that the VCPU is finally descheduled after the running + * flag is cleared and the scheduler lock is released. + */ + while ( test_bit(_VCPUF_running, &v->vcpu_flags) + && !domain_runnable(v) + && spin_is_locked(&schedule_data[v->processor].schedule_lock) ) cpu_relax(); + /* Counteract lazy context switching. */ if ( cpu_isset(v->processor, v->domain->cpumask) ) sync_lazy_execstate_cpu(v->processor); } -void domain_wake(struct vcpu *v) +void vcpu_wake(struct vcpu *v) { unsigned long flags; @@ -293,7 +300,7 @@ return -ESRCH; clear_bit(_VCPUF_down, &target->vcpu_flags); /* wake vcpu */ - domain_wake(target); + vcpu_wake(target); return 0; } @@ -457,10 +464,10 @@ } } } - } while (!succ); - //spin_lock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock); + } while ( !succ ); + SCHED_OP(adjdom, d, cmd); - //spin_unlock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock); + for (cpu = 0; cpu < NR_CPUS; cpu++) if (__get_cpu_bit(cpu, have_lock)) spin_unlock(&schedule_data[cpu].schedule_lock); @@ -520,7 +527,8 @@ perfc_incrc(sched_ctx); #if defined(WAKE_HISTO) - if ( !is_idle_task(next->domain) && next->wokenup ) { + if ( !is_idle_task(next->domain) && next->wokenup ) + { ulong diff = (ulong)(now - next->wokenup); diff /= (ulong)MILLISECS(1); if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++; diff -r 9312a3e8a6f8 -r 83c73802f02a xen/include/asm-x86/mm.h --- a/xen/include/asm-x86/mm.h Fri Aug 26 09:05:43 2005 +++ b/xen/include/asm-x86/mm.h Fri Aug 26 09:29:54 2005 @@ -370,6 +370,8 @@ void propagate_page_fault(unsigned long addr, u16 error_code); +extern int __sync_lazy_execstate(void); + /* * Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must * hold a reference to the page. diff -r 9312a3e8a6f8 -r 83c73802f02a xen/include/xen/sched.h --- a/xen/include/xen/sched.h Fri Aug 26 09:05:43 2005 +++ b/xen/include/xen/sched.h Fri Aug 26 09:29:54 2005 @@ -245,18 +245,15 @@ long sched_ctl(struct sched_ctl_cmd *); long sched_adjdom(struct sched_adjdom_cmd *); int sched_id(); -void domain_wake(struct vcpu *d); -void domain_sleep_nosync(struct vcpu *d); -void domain_sleep_sync(struct vcpu *d); - -/* - * Force loading of currently-executing domain state on the specified set - * of CPUs. This is used to counteract lazy state switching where required. +void vcpu_wake(struct vcpu *d); +void vcpu_sleep_nosync(struct vcpu *d); +void vcpu_sleep_sync(struct vcpu *d); + +/* + * Force loading of currently-executing domain state on the specified CPU. + * This is used to counteract lazy state switching where required. */ extern void sync_lazy_execstate_cpu(unsigned int cpu); -extern void sync_lazy_execstate_mask(cpumask_t mask); -extern void sync_lazy_execstate_all(void); -extern int __sync_lazy_execstate(void); /* * Called by the scheduler to switch to another VCPU. On entry, although @@ -268,7 +265,7 @@ * The callee must ensure that the local CPU is no longer running in @prev's * context, and that the context is saved to memory, before returning. * Alternatively, if implementing lazy context switching, it suffices to ensure - * that invoking __sync_lazy_execstate() will switch and commit @prev's state. + * that invoking sync_lazy_execstate() will switch and commit @prev's state. */ extern void context_switch( struct vcpu *prev, @@ -287,7 +284,8 @@ extern void continue_running( struct vcpu *same); -int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */ +/* Is CPU 'cpu' idle right now? */ +int idle_cpu(int cpu); void startup_cpu_idle_loop(void); @@ -410,7 +408,7 @@ static inline void vcpu_unblock(struct vcpu *v) { if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) ) - domain_wake(v); + vcpu_wake(v); } #define IS_PRIV(_d) \ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |