[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Rename per-domain cpumask to more descriptive domain_dirty_cpumask.
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 82eafda1c710918f7b1b18aa658323cc75cf3967 # Parent 3eeabf448f91642f9148120564f86062b4dbb0de Rename per-domain cpumask to more descriptive domain_dirty_cpumask. Add a new per-vcpu dirty cpumask (vcpu_dirty_cpumask), useful for state flushing and selective tlb flushing. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r 3eeabf448f91 -r 82eafda1c710 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Fri Jan 6 15:47:25 2006 +++ b/xen/arch/x86/domain.c Fri Jan 6 16:45:31 2006 @@ -94,7 +94,8 @@ ASSERT(is_idle_domain(v->domain)); percpu_ctxt[smp_processor_id()].curr_vcpu = v; - cpu_set(smp_processor_id(), v->domain->cpumask); + cpu_set(smp_processor_id(), v->domain->domain_dirty_cpumask); + cpu_set(smp_processor_id(), v->vcpu_dirty_cpumask); v->arch.schedule_tail = continue_idle_domain; reset_stack_and_jump(idle_loop); @@ -724,7 +725,8 @@ } if ( p->domain != n->domain ) - cpu_set(cpu, n->domain->cpumask); + cpu_set(cpu, n->domain->domain_dirty_cpumask); + cpu_set(cpu, n->vcpu_dirty_cpumask); write_ptbase(n); @@ -737,7 +739,8 @@ } if ( p->domain != n->domain ) - cpu_clear(cpu, p->domain->cpumask); + cpu_clear(cpu, p->domain->domain_dirty_cpumask); + cpu_clear(cpu, n->vcpu_dirty_cpumask); percpu_ctxt[cpu].curr_vcpu = n; } @@ -812,20 +815,11 @@ void sync_vcpu_execstate(struct vcpu *v) { - unsigned int cpu = v->processor; - - if ( !cpu_isset(cpu, v->domain->cpumask) ) - return; - - if ( cpu == smp_processor_id() ) - { + if ( cpu_isset(smp_processor_id(), v->vcpu_dirty_cpumask) ) (void)__sync_lazy_execstate(); - } - else - { - /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ - flush_tlb_mask(cpumask_of_cpu(cpu)); - } + + /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ + flush_tlb_mask(v->vcpu_dirty_cpumask); } unsigned long __hypercall_create_continuation( @@ -951,7 +945,7 @@ struct vcpu *v; unsigned long pfn; - BUG_ON(!cpus_empty(d->cpumask)); + BUG_ON(!cpus_empty(d->domain_dirty_cpumask)); ptwr_destroy(d); diff -r 3eeabf448f91 -r 82eafda1c710 xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c Fri Jan 6 15:47:25 2006 +++ b/xen/arch/x86/mm.c Fri Jan 6 16:45:31 2006 @@ -1457,7 +1457,8 @@ * was GDT/LDT) but those circumstances should be * very rare. */ - cpumask_t mask = page_get_owner(page)->cpumask; + cpumask_t mask = + page_get_owner(page)->domain_dirty_cpumask; tlbflush_filter(mask, page->tlbflush_timestamp); if ( unlikely(!cpus_empty(mask)) ) @@ -1619,7 +1620,7 @@ if ( shadow_mode_enabled(d) ) shadow_sync_all(d); if ( deferred_ops & DOP_FLUSH_ALL_TLBS ) - flush_tlb_mask(d->cpumask); + flush_tlb_mask(d->domain_dirty_cpumask); else local_flush_tlb(); } @@ -1691,7 +1692,7 @@ struct domain *d, unsigned long vmask) { unsigned int vcpu_id; - cpumask_t pmask; + cpumask_t pmask = CPU_MASK_NONE; struct vcpu *v; while ( vmask != 0 ) @@ -1700,7 +1701,7 @@ vmask &= ~(1UL << vcpu_id); if ( (vcpu_id < MAX_VIRT_CPUS) && ((v = d->vcpu[vcpu_id]) != NULL) ) - cpu_set(v->processor, pmask); + cpus_or(pmask, pmask, v->vcpu_dirty_cpumask); } return pmask; @@ -1869,7 +1870,6 @@ break; } pmask = vcpumask_to_pcpumask(d, vmask); - cpus_and(pmask, pmask, d->cpumask); if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI ) flush_tlb_mask(pmask); else @@ -1878,11 +1878,11 @@ } case MMUEXT_TLB_FLUSH_ALL: - flush_tlb_mask(d->cpumask); + flush_tlb_mask(d->domain_dirty_cpumask); break; case MMUEXT_INVLPG_ALL: - flush_tlb_one_mask(d->cpumask, op.arg1.linear_addr); + flush_tlb_one_mask(d->domain_dirty_cpumask, op.arg1.linear_addr); break; case MMUEXT_FLUSH_CACHE: @@ -2548,13 +2548,12 @@ local_flush_tlb(); break; case UVMF_ALL: - flush_tlb_mask(d->cpumask); + flush_tlb_mask(d->domain_dirty_cpumask); break; default: if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) ) rc = -EFAULT; pmask = vcpumask_to_pcpumask(d, vmask); - cpus_and(pmask, pmask, d->cpumask); flush_tlb_mask(pmask); break; } @@ -2569,13 +2568,12 @@ local_flush_tlb_one(va); break; case UVMF_ALL: - flush_tlb_one_mask(d->cpumask, va); + flush_tlb_one_mask(d->domain_dirty_cpumask, va); break; default: if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) ) rc = -EFAULT; pmask = vcpumask_to_pcpumask(d, vmask); - cpus_and(pmask, pmask, d->cpumask); flush_tlb_one_mask(pmask, va); break; } @@ -3018,7 +3016,7 @@ /* Ensure that there are no stale writable mappings in any TLB. */ /* NB. INVLPG is a serialising instruction: flushes pending updates. */ - flush_tlb_one_mask(d->cpumask, l1va); + flush_tlb_one_mask(d->domain_dirty_cpumask, l1va); PTWR_PRINTK("[%c] disconnected_l1va at %p now %"PRIpte"\n", PTWR_PRINT_WHICH, ptep, pte.l1); @@ -3342,7 +3340,7 @@ if ( which == PTWR_PT_ACTIVE ) { l2e_remove_flags(*pl2e, _PAGE_PRESENT); - flush_tlb_mask(d->cpumask); + flush_tlb_mask(d->domain_dirty_cpumask); } /* Temporarily map the L1 page, and make a copy of it. */ diff -r 3eeabf448f91 -r 82eafda1c710 xen/arch/x86/shadow.c --- a/xen/arch/x86/shadow.c Fri Jan 6 15:47:25 2006 +++ b/xen/arch/x86/shadow.c Fri Jan 6 16:45:31 2006 @@ -1800,7 +1800,7 @@ } /* Other VCPUs mustn't use the revoked writable mappings. */ - other_vcpus_mask = d->cpumask; + other_vcpus_mask = d->domain_dirty_cpumask; cpu_clear(smp_processor_id(), other_vcpus_mask); flush_tlb_mask(other_vcpus_mask); diff -r 3eeabf448f91 -r 82eafda1c710 xen/arch/x86/shadow32.c --- a/xen/arch/x86/shadow32.c Fri Jan 6 15:47:25 2006 +++ b/xen/arch/x86/shadow32.c Fri Jan 6 16:45:31 2006 @@ -2586,7 +2586,7 @@ } /* Other VCPUs mustn't use the revoked writable mappings. */ - other_vcpus_mask = d->cpumask; + other_vcpus_mask = d->domain_dirty_cpumask; cpu_clear(smp_processor_id(), other_vcpus_mask); flush_tlb_mask(other_vcpus_mask); diff -r 3eeabf448f91 -r 82eafda1c710 xen/common/domain.c --- a/xen/common/domain.c Fri Jan 6 15:47:25 2006 +++ b/xen/common/domain.c Fri Jan 6 16:45:31 2006 @@ -179,7 +179,7 @@ /* Make sure that every vcpu is descheduled before we finalise. */ for_each_vcpu ( d, v ) vcpu_sleep_sync(v); - BUG_ON(!cpus_empty(d->cpumask)); + BUG_ON(!cpus_empty(d->domain_dirty_cpumask)); sync_pagetable_state(d); diff -r 3eeabf448f91 -r 82eafda1c710 xen/common/grant_table.c --- a/xen/common/grant_table.c Fri Jan 6 15:47:25 2006 +++ b/xen/common/grant_table.c Fri Jan 6 16:45:31 2006 @@ -471,7 +471,7 @@ for ( i = 0; i < count; i++ ) (void)__gnttab_unmap_grant_ref(&uop[i]); - flush_tlb_mask(current->domain->cpumask); + flush_tlb_mask(current->domain->domain_dirty_cpumask); return 0; } diff -r 3eeabf448f91 -r 82eafda1c710 xen/common/page_alloc.c --- a/xen/common/page_alloc.c Fri Jan 6 15:47:25 2006 +++ b/xen/common/page_alloc.c Fri Jan 6 16:45:31 2006 @@ -615,7 +615,7 @@ shadow_drop_references(d, &pg[i]); ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0); pg[i].tlbflush_timestamp = tlbflush_current_time(); - pg[i].u.free.cpumask = d->cpumask; + pg[i].u.free.cpumask = d->domain_dirty_cpumask; list_del(&pg[i].list); } diff -r 3eeabf448f91 -r 82eafda1c710 xen/common/schedule.c --- a/xen/common/schedule.c Fri Jan 6 15:47:25 2006 +++ b/xen/common/schedule.c Fri Jan 6 16:45:31 2006 @@ -339,18 +339,23 @@ do { succ = 0; __clear_cpu_bits(have_lock); - for_each_vcpu(d, v) { + for_each_vcpu ( d, v ) + { cpu = v->processor; - if (!__get_cpu_bit(cpu, have_lock)) { + if ( !__get_cpu_bit(cpu, have_lock) ) + { /* if we don't have a lock on this CPU: acquire it*/ - if (spin_trylock(&schedule_data[cpu].schedule_lock)) { + if ( spin_trylock(&schedule_data[cpu].schedule_lock) ) + { /*we have this lock!*/ __set_cpu_bit(cpu, have_lock); succ = 1; - } else { + } + else + { /*we didn,t get this lock -> free all other locks too!*/ - for (cpu = 0; cpu < NR_CPUS; cpu++) - if (__get_cpu_bit(cpu, have_lock)) + for ( cpu = 0; cpu < NR_CPUS; cpu++ ) + if ( __get_cpu_bit(cpu, have_lock) ) spin_unlock(&schedule_data[cpu].schedule_lock); /* and start from the beginning! */ succ = 0; @@ -363,8 +368,8 @@ SCHED_OP(adjdom, d, cmd); - for (cpu = 0; cpu < NR_CPUS; cpu++) - if (__get_cpu_bit(cpu, have_lock)) + for ( cpu = 0; cpu < NR_CPUS; cpu++ ) + if ( __get_cpu_bit(cpu, have_lock) ) spin_unlock(&schedule_data[cpu].schedule_lock); __clear_cpu_bits(have_lock); @@ -380,8 +385,8 @@ */ static void __enter_scheduler(void) { - struct vcpu *prev = current, *next = NULL; - int cpu = prev->processor; + struct vcpu *prev = current, *next = NULL; + int cpu = smp_processor_id(); s_time_t now; struct task_slice next_slice; s32 r_time; /* time for new dom to run */ @@ -502,7 +507,7 @@ static void t_timer_fn(void *unused) { struct vcpu *v = current; - unsigned int cpu = v->processor; + unsigned int cpu = smp_processor_id(); schedule_data[cpu].tick++; diff -r 3eeabf448f91 -r 82eafda1c710 xen/include/asm-x86/processor.h --- a/xen/include/asm-x86/processor.h Fri Jan 6 15:47:25 2006 +++ b/xen/include/asm-x86/processor.h Fri Jan 6 16:45:31 2006 @@ -190,7 +190,7 @@ #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); #else -static inline void detect_ht(struct cpuinfo_x86 *c) {} +static always_inline void detect_ht(struct cpuinfo_x86 *c) {} #endif /* @@ -209,7 +209,7 @@ /* * CPUID functions returning a single datum */ -static inline unsigned int cpuid_eax(unsigned int op) +static always_inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; @@ -219,7 +219,7 @@ : "bx", "cx", "dx"); return eax; } -static inline unsigned int cpuid_ebx(unsigned int op) +static always_inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; @@ -229,7 +229,7 @@ : "cx", "dx" ); return ebx; } -static inline unsigned int cpuid_ecx(unsigned int op) +static always_inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; @@ -239,7 +239,7 @@ : "bx", "dx" ); return ecx; } -static inline unsigned int cpuid_edx(unsigned int op) +static always_inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; @@ -281,7 +281,7 @@ */ extern unsigned long mmu_cr4_features; -static inline void set_in_cr4 (unsigned long mask) +static always_inline void set_in_cr4 (unsigned long mask) { unsigned long dummy; mmu_cr4_features |= mask; @@ -292,7 +292,7 @@ : "=&r" (dummy) : "irg" (mask) ); } -static inline void clear_in_cr4 (unsigned long mask) +static always_inline void clear_in_cr4 (unsigned long mask) { unsigned long dummy; mmu_cr4_features &= ~mask; @@ -334,7 +334,7 @@ outb((data), 0x23); \ } while (0) -static inline void __monitor(const void *eax, unsigned long ecx, +static always_inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { /* "monitor %eax,%ecx,%edx;" */ @@ -343,7 +343,7 @@ : :"a" (eax), "c" (ecx), "d"(edx)); } -static inline void __mwait(unsigned long eax, unsigned long ecx) +static always_inline void __mwait(unsigned long eax, unsigned long ecx) { /* "mwait %eax,%ecx;" */ asm volatile( @@ -460,7 +460,7 @@ }; /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static inline void rep_nop(void) +static always_inline void rep_nop(void) { __asm__ __volatile__ ( "rep;nop" : : : "memory" ); } @@ -471,7 +471,7 @@ #ifdef CONFIG_MPENTIUMIII #define ARCH_HAS_PREFETCH -extern inline void prefetch(const void *x) +extern always_inline void prefetch(const void *x) { __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x)); } @@ -482,12 +482,12 @@ #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH -extern inline void prefetch(const void *x) +extern always_inline void prefetch(const void *x) { __asm__ __volatile__ ("prefetch (%0)" : : "r"(x)); } -extern inline void prefetchw(const void *x) +extern always_inline void prefetchw(const void *x) { __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x)); } diff -r 3eeabf448f91 -r 82eafda1c710 xen/include/asm-x86/shadow.h --- a/xen/include/asm-x86/shadow.h Fri Jan 6 15:47:25 2006 +++ b/xen/include/asm-x86/shadow.h Fri Jan 6 16:45:31 2006 @@ -591,7 +591,7 @@ if ( need_flush ) { perfc_incrc(update_hl2e_invlpg); - flush_tlb_one_mask(v->domain->cpumask, + flush_tlb_one_mask(v->domain->domain_dirty_cpumask, &linear_pg_table[l1_linear_offset(va)]); } } diff -r 3eeabf448f91 -r 82eafda1c710 xen/include/xen/sched.h --- a/xen/include/xen/sched.h Fri Jan 6 15:47:25 2006 +++ b/xen/include/xen/sched.h Fri Jan 6 16:45:31 2006 @@ -78,7 +78,11 @@ atomic_t pausecnt; + /* Bitmask of CPUs on which this VCPU may run. */ cpumask_t cpu_affinity; + + /* Bitmask of CPUs which are holding onto this VCPU's state. */ + cpumask_t vcpu_dirty_cpumask; struct arch_vcpu arch; }; @@ -139,7 +143,7 @@ struct vcpu *vcpu[MAX_VIRT_CPUS]; /* Bitmask of CPUs which are holding onto this domain's state. */ - cpumask_t cpumask; + cpumask_t domain_dirty_cpumask; struct arch_domain arch; _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |