[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Improved lazy state switching -- flush the state switch through on the
ChangeSet 1.1417, 2005/04/01 10:23:54+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx Improved lazy state switching -- flush the state switch through on the first flush IPI received. This avoids needing to receive any further ones at little cost (needed to flush TLB anyway). The main extra cost will be that, when switching back to a guest, we won't save the ctxt switch. But we may have saved an unbounded number of IPI flushes. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> arch/ia64/xenmisc.c | 1 arch/x86/domain.c | 73 +++++++++++++++++++++++++--------------------------- arch/x86/mm.c | 3 +- arch/x86/smp.c | 50 ++++------------------------------- include/xen/sched.h | 10 ++++--- include/xen/smp.h | 3 -- 6 files changed, 51 insertions(+), 89 deletions(-) diff -Nru a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c --- a/xen/arch/ia64/xenmisc.c 2005-04-01 05:05:04 -05:00 +++ b/xen/arch/ia64/xenmisc.c 2005-04-01 05:05:04 -05:00 @@ -53,7 +53,6 @@ } /* calls in xen/common code that are unused on ia64 */ -void synchronise_execution_state(unsigned long cpu_mask) { } int grant_table_create(struct domain *d) { return 0; } void grant_table_destroy(struct domain *d) diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c 2005-04-01 05:05:04 -05:00 +++ b/xen/arch/x86/domain.c 2005-04-01 05:05:04 -05:00 @@ -744,12 +744,21 @@ return stu.rax; } +#define switch_kernel_stack(_n,_c) ((void)0) + #elif defined(__i386__) #define load_segments(_p, _n) ((void)0) #define save_segments(_p) ((void)0) #define clear_segments() ((void)0) +static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu) +{ + struct tss_struct *tss = &init_tss[cpu]; + tss->esp1 = n->arch.kernel_sp; + tss->ss1 = n->arch.kernel_ss; +} + #endif #define loaddebug(_ed,_reg) \ @@ -772,41 +781,35 @@ save_segments(p); } - memcpy(stack_ec, - &n->arch.user_ctxt, - sizeof(*stack_ec)); - - /* Maybe switch the debug registers. */ - if ( unlikely(n->arch.debugreg[7]) ) + if ( !is_idle_task(n->domain) ) { - loaddebug(&n->arch, 0); - loaddebug(&n->arch, 1); - loaddebug(&n->arch, 2); - loaddebug(&n->arch, 3); - /* no 4 and 5 */ - loaddebug(&n->arch, 6); - loaddebug(&n->arch, 7); - } + memcpy(stack_ec, + &n->arch.user_ctxt, + sizeof(*stack_ec)); - if ( !VMX_DOMAIN(n) ) - { - SET_FAST_TRAP(&n->arch); + /* Maybe switch the debug registers. */ + if ( unlikely(n->arch.debugreg[7]) ) + { + loaddebug(&n->arch, 0); + loaddebug(&n->arch, 1); + loaddebug(&n->arch, 2); + loaddebug(&n->arch, 3); + /* no 4 and 5 */ + loaddebug(&n->arch, 6); + loaddebug(&n->arch, 7); + } -#ifdef __i386__ + if ( !VMX_DOMAIN(n) ) { - /* Switch the kernel ring-1 stack. */ - struct tss_struct *tss = &init_tss[cpu]; - tss->esp1 = n->arch.kernel_sp; - tss->ss1 = n->arch.kernel_ss; + SET_FAST_TRAP(&n->arch); + switch_kernel_stack(n, cpu); } -#endif } set_bit(cpu, &n->domain->cpuset); write_ptbase(n); - clear_bit(cpu, &p->domain->cpuset); - __asm__ __volatile__ ( "lgdt %0" : "=m" (*n->arch.gdt) ); + clear_bit(cpu, &p->domain->cpuset); percpu_ctxt[cpu].curr_ed = n; } @@ -820,7 +823,7 @@ set_current(next); - if ( ((realprev = percpu_ctxt[smp_processor_id()]. curr_ed) == next) || + if ( ((realprev = percpu_ctxt[smp_processor_id()].curr_ed) == next) || is_idle_task(next->domain) ) { local_irq_enable(); @@ -851,18 +854,14 @@ BUG(); } -static void __synchronise_lazy_execstate(void *unused) -{ - if ( percpu_ctxt[smp_processor_id()].curr_ed != current ) - { - __context_switch(); - load_LDT(current); - clear_segments(); - } -} -void synchronise_lazy_execstate(unsigned long cpuset) +int __sync_lazy_execstate(void) { - smp_subset_call_function(__synchronise_lazy_execstate, NULL, 1, cpuset); + if ( percpu_ctxt[smp_processor_id()].curr_ed == current ) + return 0; + __context_switch(); + load_LDT(current); + clear_segments(); + return 1; } unsigned long __hypercall_create_continuation( diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c 2005-04-01 05:05:04 -05:00 +++ b/xen/arch/x86/mm.c 2005-04-01 05:05:04 -05:00 @@ -2827,7 +2827,8 @@ if ( d != current->domain ) domain_pause(d); - synchronise_lazy_execstate(~0UL); + + sync_lazy_execstate_all(); printk("pt base=%lx sh_info=%x\n", pagetable_val(d->exec_domain[0]->arch.guest_table)>>PAGE_SHIFT, diff -Nru a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c --- a/xen/arch/x86/smp.c 2005-04-01 05:05:04 -05:00 +++ b/xen/arch/x86/smp.c 2005-04-01 05:05:04 -05:00 @@ -154,10 +154,13 @@ { ack_APIC_irq(); perfc_incrc(ipis); - if ( flush_va == FLUSHVA_ALL ) - local_flush_tlb(); - else - local_flush_tlb_one(flush_va); + if ( !__sync_lazy_execstate() ) + { + if ( flush_va == FLUSHVA_ALL ) + local_flush_tlb(); + else + local_flush_tlb_one(flush_va); + } clear_bit(smp_processor_id(), &flush_cpumask); } @@ -269,45 +272,6 @@ wmb(); send_IPI_allbutself(CALL_FUNCTION_VECTOR); - - while ( (wait ? data.finished : data.started) != cpuset ) - cpu_relax(); - - spin_unlock(&call_lock); - - return 0; -} - -/* Run a function on a subset of CPUs (may include local CPU). */ -int smp_subset_call_function( - void (*func) (void *info), void *info, int wait, unsigned long cpuset) -{ - struct call_data_struct data; - - ASSERT(local_irq_is_enabled()); - - if ( cpuset & (1UL << smp_processor_id()) ) - { - local_irq_disable(); - (*func)(info); - local_irq_enable(); - } - - cpuset &= ((1UL << smp_num_cpus) - 1) & ~(1UL << smp_processor_id()); - if ( cpuset == 0 ) - return 0; - - data.func = func; - data.info = info; - data.started = data.finished = 0; - data.wait = wait; - - spin_lock(&call_lock); - - call_data = &data; - wmb(); - - send_IPI_mask(cpuset, CALL_FUNCTION_VECTOR); while ( (wait ? data.finished : data.started) != cpuset ) cpu_relax(); diff -Nru a/xen/include/xen/sched.h b/xen/include/xen/sched.h --- a/xen/include/xen/sched.h 2005-04-01 05:05:03 -05:00 +++ b/xen/include/xen/sched.h 2005-04-01 05:05:04 -05:00 @@ -255,7 +255,9 @@ * Force loading of currently-executing domain state on the specified set * of CPUs. This is used to counteract lazy state switching where required. */ -void synchronise_lazy_execstate(unsigned long cpuset); +#define sync_lazy_execstate_cpuset(_cpuset) flush_tlb_mask(_cpuset) +#define sync_lazy_execstate_all() flush_tlb_all() +extern int __sync_lazy_execstate(void); extern void context_switch( struct exec_domain *prev, @@ -337,7 +339,7 @@ ASSERT(ed != current); atomic_inc(&ed->pausecnt); domain_sleep(ed); - synchronise_lazy_execstate(ed->domain->cpuset & (1UL << ed->processor)); + sync_lazy_execstate_cpuset(ed->domain->cpuset & (1UL << ed->processor)); } static inline void domain_pause(struct domain *d) @@ -351,7 +353,7 @@ domain_sleep(ed); } - synchronise_lazy_execstate(d->cpuset); + sync_lazy_execstate_cpuset(d->cpuset); } static inline void exec_domain_unpause(struct exec_domain *ed) @@ -386,7 +388,7 @@ domain_sleep(ed); } - synchronise_lazy_execstate(d->cpuset); + sync_lazy_execstate_cpuset(d->cpuset); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog |
Lists.xenproject.org is hosted with RackSpace, monitoring our |