[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Merged.
# HG changeset patch # User emellor@xxxxxxxxxxxxxxxxxxxxxx # Node ID ca2e91ab431167192e03ccd7248af40bc4d1a45b # Parent fe487b19c379a6f927fea4c01faa6e039594864e # Parent 76fbcb25d174095d211712e52b43bbe9fff35bdc Merged. diff -r fe487b19c379 -r ca2e91ab4311 linux-2.6-xen-sparse/include/asm-xen/asm-i386/spinlock.h --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/spinlock.h Thu Nov 3 01:44:41 2005 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/spinlock.h Thu Nov 3 01:45:07 2005 @@ -6,6 +6,7 @@ #include <asm/page.h> #include <linux/config.h> #include <linux/compiler.h> +#include <asm/smp_alt.h> asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); @@ -47,8 +48,9 @@ #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) #define spin_lock_string \ - "\n1:\t" \ - "lock ; decb %0\n\t" \ + "1:\n" \ + LOCK \ + "decb %0\n\t" \ "jns 3f\n" \ "2:\t" \ "rep;nop\n\t" \ @@ -58,8 +60,9 @@ "3:\n\t" #define spin_lock_string_flags \ - "\n1:\t" \ - "lock ; decb %0\n\t" \ + "1:\n" \ + LOCK \ + "decb %0\n\t" \ "jns 4f\n\t" \ "2:\t" \ "testl $0x200, %1\n\t" \ @@ -121,10 +124,34 @@ static inline int _raw_spin_trylock(spinlock_t *lock) { char oldval; - __asm__ __volatile__( - "xchgb %b0,%1" +#ifdef CONFIG_SMP_ALTERNATIVES + __asm__ __volatile__( + "1:movb %1,%b0\n" + "movb $0,%1\n" + "2:" + ".section __smp_alternatives,\"a\"\n" + ".long 1b\n" + ".long 3f\n" + ".previous\n" + ".section __smp_replacements,\"a\"\n" + "3: .byte 2b - 1b\n" + ".byte 5f-4f\n" + ".byte 0\n" + ".byte 6f-5f\n" + ".byte -1\n" + "4: xchgb %b0,%1\n" + "5: movb %1,%b0\n" + "movb $0,%1\n" + "6:\n" + ".previous\n" :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); +#else + __asm__ __volatile__( + "xchgb %b0,%1\n" + :"=q" (oldval), "=m" (lock->slock) + :"0" (0) : "memory"); +#endif return oldval > 0; } @@ -225,8 +252,8 @@ __build_write_lock(rw, "__write_lock_failed"); } -#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") -#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") +#define _raw_read_unlock(rw) asm volatile(LOCK "incl %0" :"=m" ((rw)->lock) : : "memory") +#define _raw_write_unlock(rw) asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") static inline int _raw_read_trylock(rwlock_t *lock) { diff -r fe487b19c379 -r ca2e91ab4311 xen/arch/x86/x86_32/domain_page.c --- a/xen/arch/x86/x86_32/domain_page.c Thu Nov 3 01:44:41 2005 +++ b/xen/arch/x86/x86_32/domain_page.c Thu Nov 3 01:45:07 2005 @@ -27,6 +27,19 @@ static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS]; static spinlock_t map_lock = SPIN_LOCK_UNLOCKED; +/* Use a spare PTE bit to mark entries ready for recycling. */ +#define READY_FOR_TLB_FLUSH (1<<10) + +static void flush_all_ready_maps(void) +{ + l1_pgentry_t *cache = mapcache; + unsigned int i; + + for ( i = 0; i < MAPCACHE_ENTRIES; i++ ) + if ( (l1e_get_flags(cache[i]) & READY_FOR_TLB_FLUSH) ) + cache[i] = l1e_empty(); +} + void *map_domain_page(unsigned long pfn) { unsigned long va; @@ -54,6 +67,7 @@ if ( unlikely(idx == 0) ) { ASSERT(flush_count++ == 0); + flush_all_ready_maps(); perfc_incrc(domain_page_tlb_flush); local_flush_tlb(); shadow_epoch[cpu] = ++epoch; @@ -75,5 +89,5 @@ ASSERT((void *)MAPCACHE_VIRT_START <= va); ASSERT(va < (void *)MAPCACHE_VIRT_END); idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; - mapcache[idx] = l1e_empty(); + l1e_add_flags(mapcache[idx], READY_FOR_TLB_FLUSH); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |