[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Based on the Keir's suggestion yesterday, I fixed the bug in xenlinux.



ChangeSet 1.1768, 2005/06/28 18:47:14+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Based on the Keir's suggestion yesterday, I fixed the bug in xenlinux.
        Now the LTP test cases pass well in domU; I ran LTP in domU along with
        an infinite loop of "make clean; make -j4" on dom0 for sanity tests.
        
        Signed-off-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>



 arch/xen/x86_64/kernel/process.c         |   54 +++++++------------------------
 include/asm-xen/asm-x86_64/mmu_context.h |   40 ++++++++++++++++++++--
 2 files changed, 48 insertions(+), 46 deletions(-)


diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c 
b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c
--- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c  2005-06-28 
14:02:30 -04:00
+++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c  2005-06-28 
14:02:30 -04:00
@@ -474,51 +474,26 @@
         * Switch DS and ES.
         * This won't pick up thread selector changes, but I guess that is ok.
         */
-       asm volatile("movl %%es,%0" : "=m" (prev->es)); 
-       if (unlikely(next->es | prev->es))
+       if (unlikely(next->es))
                loadsegment(es, next->es); 
        
-       asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); 
-       if (unlikely(next->ds | prev->ds))
+       if (unlikely(next->ds))
                loadsegment(ds, next->ds);
 
        /* 
         * Switch FS and GS.
         */
-       { 
-               unsigned fsindex;
-               asm volatile("movl %%fs,%0" : "=g" (fsindex)); 
-               /* segment register != 0 always requires a reload. 
-                  also reload when it has changed. 
-                  when prev process used 64bit base always reload
-                  to avoid an information leak. */
-               if (unlikely(fsindex | next->fsindex | prev->fs)) {
-                       loadsegment(fs, next->fsindex);
-                       /* check if the user used a selector != 0
-                        * if yes clear 64bit base, since overloaded base
-                         * is always mapped to the Null selector
-                         */
-                       if (fsindex)
-                       prev->fs = 0;                           
-               }
-               /* when next process has a 64bit base use it */
-               if (next->fs) {
-                        HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
-                }
-               prev->fsindex = fsindex;
-       }
-       { 
-               unsigned gsindex;
-               asm volatile("movl %%gs,%0" : "=g" (gsindex)); 
-               if (unlikely(gsindex | next->gsindex | prev->gs)) {
-                       load_gs_index(next->gsindex);
-                       if (gsindex)
-                       prev->gs = 0;                           
-               }
-               if (next->gs)
-                    HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
-               prev->gsindex = gsindex;
-       }
+       if (unlikely(next->fsindex))
+               loadsegment(fs, next->fsindex);
+
+       if (next->fs)
+               HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
+       
+       if (unlikely(next->gsindex))
+               load_gs_index(next->gsindex);
+
+       if (next->gs)
+               HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
 
        /* 
         * Switch the PDA context.
@@ -660,7 +635,6 @@
                        if (doit) {
                load_gs_index(0);
                 ret = HYPERVISOR_set_segment_base(SEGBASE_GS_USER, addr);
-                printk("do_arch_prctl: SET_SET: addr = %lx\n", addr);
                        } 
                }
                put_cpu();
@@ -700,7 +674,6 @@
                        base = read_32bit_tls(task, FS_TLS);
                else if (doit) {
                        rdmsrl(MSR_FS_BASE, base);
-                        printk("do_arch_prctl: GET_FS: addr = %lx\n", addr);
                } else
                        base = task->thread.fs;
                ret = put_user(base, (unsigned long __user *)addr); 
@@ -712,7 +685,6 @@
                        base = read_32bit_tls(task, GS_TLS);
                else if (doit) {
                        rdmsrl(MSR_KERNEL_GS_BASE, base);
-                        printk("do_arch_prctl: GET_GS: addr = %lx\n", addr);
                } else
                        base = task->thread.gs;
                ret = put_user(base, (unsigned long __user *)addr); 
diff -Nru a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h 
b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h
--- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h  
2005-06-28 14:02:30 -04:00
+++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h  
2005-06-28 14:02:30 -04:00
@@ -16,18 +16,48 @@
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
-#ifdef CONFIG_SMP
-
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct 
*tsk)
 {
+#if 0 /*  XEN: no lazy tlb */
        if (read_pda(mmu_state) == TLBSTATE_OK) 
                write_pda(mmu_state, TLBSTATE_LAZY);
+#endif
 }
-#else
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct 
*tsk)
+
+#define prepare_arch_switch(rq,next)   __prepare_arch_switch()
+#define finish_arch_switch(rq, next)   spin_unlock_irq(&(rq)->lock)
+#define task_running(rq, p)            ((rq)->curr == (p))
+
+static inline void __prepare_arch_switch(void)
 {
+       /*
+        * Save away %es, %ds, %fs and %gs. Must happen before reload
+        * of cr3/ldt (i.e., not in __switch_to).
+        */
+       __asm__ __volatile__ (
+               "movl %%es,%0 ; movl %%ds,%1 ; movl %%fs,%2 ; movl %%gs,%3"
+               : "=m" (current->thread.es),
+                 "=m" (current->thread.ds),
+                 "=m" (current->thread.fsindex),
+                 "=m" (current->thread.gsindex) );
+
+       if (current->thread.ds)
+               __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
+
+       if (current->thread.es)
+               __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
+
+       if (current->thread.fsindex) {
+               __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
+               current->thread.fs = 0;
+       }
+
+       if (current->thread.gsindex) {
+               load_gs_index(0);
+               current->thread.gs = 0;
+       }
 }
-#endif
+
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
                             struct task_struct *tsk)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.