[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] More code cleanups, mainly to do_iret() implementations.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID b7e88c83b2a0991590e5d46531d487a3356cd4fb
# Parent  c96ea9ebcd298c30bc251cce14b78ca8cb3dd69e
More code cleanups, mainly to do_iret() implementations.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r c96ea9ebcd29 -r b7e88c83b2a0 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Jan 11 18:26:03 2006
+++ b/xen/arch/x86/domain.c     Wed Jan 11 18:44:54 2006
@@ -480,14 +480,6 @@
 
 #ifdef __x86_64__
 
-void toggle_guest_mode(struct vcpu *v)
-{
-    v->arch.flags ^= TF_kernel_mode;
-    __asm__ __volatile__ ( "swapgs" );
-    update_pagetables(v);
-    write_ptbase(v);
-}
-
 #define loadsegment(seg,value) ({               \
     int __r = 1;                                \
     __asm__ __volatile__ (                      \
diff -r c96ea9ebcd29 -r b7e88c83b2a0 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Jan 11 18:26:03 2006
+++ b/xen/arch/x86/mm.c Wed Jan 11 18:44:54 2006
@@ -297,7 +297,6 @@
 
 #if defined(__x86_64__)
     /* If in user mode, switch to kernel mode just to read LDT mapping. */
-    extern void toggle_guest_mode(struct vcpu *);
     int user_mode = !(v->arch.flags & TF_kernel_mode);
 #define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
 #elif defined(__i386__)
@@ -2971,7 +2970,6 @@
 
 #ifdef CONFIG_X86_64
     struct vcpu *v = current;
-    extern void toggle_guest_mode(struct vcpu *);
     int user_mode = !(v->arch.flags & TF_kernel_mode);
 #endif
 
diff -r c96ea9ebcd29 -r b7e88c83b2a0 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Jan 11 18:26:03 2006
+++ b/xen/arch/x86/traps.c      Wed Jan 11 18:44:54 2006
@@ -596,7 +596,6 @@
     u16 x;
 #if defined(__x86_64__)
     /* If in user mode, switch to kernel mode just to read I/O bitmap. */
-    extern void toggle_guest_mode(struct vcpu *);
     int user_mode = !(v->arch.flags & TF_kernel_mode);
 #define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
 #elif defined(__i386__)
diff -r c96ea9ebcd29 -r b7e88c83b2a0 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Wed Jan 11 18:26:03 2006
+++ b/xen/arch/x86/x86_32/traps.c       Wed Jan 11 18:44:54 2006
@@ -157,38 +157,37 @@
         __asm__ __volatile__ ( "hlt" );
 }
 
+static inline void pop_from_guest_stack(
+    void *dst, struct cpu_user_regs *regs, unsigned int bytes)
+{
+    if ( unlikely(copy_from_user(dst, (void __user *)regs->esp, bytes)) )
+        domain_crash_synchronous();
+    regs->esp += bytes;
+}
+
 asmlinkage unsigned long do_iret(void)
 {
     struct cpu_user_regs *regs = guest_cpu_user_regs();
 
-    /* Restore EAX (clobbered by hypercall). */
-    if ( copy_from_user(&regs->eax, (void __user *)regs->esp, 4) )
+    /* Pop and restore EAX (clobbered by hypercall). */
+    pop_from_guest_stack(&regs->eax, regs, 4);
+
+    /* Pop and restore EFLAGS, CS and EIP. */
+    pop_from_guest_stack(&regs->eip, regs, 12);
+
+    if ( VM86_MODE(regs) )
+    {
+        /* Return to VM86 mode: pop and restore ESP,SS,ES,DS,FS and GS. */
+        pop_from_guest_stack(&regs->esp, regs, 24);
+    }
+    else if ( RING_0(regs) )
+    {
         domain_crash_synchronous();
-    regs->esp += 4;
-
-    /* Restore EFLAGS, CS and EIP. */
-    if ( copy_from_user(&regs->eip, (void __user *)regs->esp, 12) )
-        domain_crash_synchronous();
-
-    if ( VM86_MODE(regs) )
-    {
-        /* Return to VM86 mode: restore ESP,SS,ES,DS,FS and GS. */
-        if(copy_from_user(&regs->esp, (void __user *)(regs->esp+12), 24))
-            domain_crash_synchronous();
-    }
-    else if ( RING_0(regs) )
-    {
-        domain_crash_synchronous();
-    }
-    else if ( RING_1(regs) ) {
-        /* Return to ring 1: pop EFLAGS,CS and EIP. */
-        regs->esp += 12;
-    }
-    else
-    {
-        /* Return to ring 2/3: restore ESP and SS. */
-        if ( copy_from_user(&regs->esp, (void __user *)(regs->esp+12), 8) )
-            domain_crash_synchronous();
+    }
+    else if ( !RING_1(regs) )
+    {
+        /* Return to ring 2/3: pop and restore ESP and SS. */
+        pop_from_guest_stack(&regs->esp, regs, 8);
     }
 
     /* Fixup EFLAGS. */
diff -r c96ea9ebcd29 -r b7e88c83b2a0 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Wed Jan 11 18:26:03 2006
+++ b/xen/arch/x86/x86_64/traps.c       Wed Jan 11 18:44:54 2006
@@ -114,7 +114,13 @@
         __asm__ __volatile__ ( "hlt" );
 }
 
-extern void toggle_guest_mode(struct vcpu *);
+void toggle_guest_mode(struct vcpu *v)
+{
+    v->arch.flags ^= TF_kernel_mode;
+    __asm__ __volatile__ ( "swapgs" );
+    update_pagetables(v);
+    write_ptbase(v);
+}
 
 long do_iret(void)
 {
@@ -122,13 +128,17 @@
     struct iret_context iret_saved;
     struct vcpu *v = current;
 
-    if ( unlikely(copy_from_user(&iret_saved, (void *)regs->rsp, 
sizeof(iret_saved))) ||
-         unlikely(pagetable_get_paddr(v->arch.guest_table_user) == 0) )
-        return -EFAULT;
-
-    /* Returning to user mode. */
-    if ( (iret_saved.cs & 0x03) == 3 )
+    if ( unlikely(copy_from_user(&iret_saved, (void *)regs->rsp,
+                                 sizeof(iret_saved))) )
+        domain_crash_synchronous();
+
+    /* Returning to user mode? */
+    if ( (iret_saved.cs & 3) == 3 )
+    {
+        if ( unlikely(pagetable_get_paddr(v->arch.guest_table_user) == 0) )
+            return -EFAULT;
         toggle_guest_mode(v);
+    }
 
     regs->rip    = iret_saved.rip;
     regs->cs     = iret_saved.cs | 3; /* force guest privilege */
diff -r c96ea9ebcd29 -r b7e88c83b2a0 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Wed Jan 11 18:26:03 2006
+++ b/xen/include/asm-x86/domain.h      Wed Jan 11 18:44:54 2006
@@ -46,7 +46,10 @@
     struct vcpu_maphash vcpu_maphash[MAX_VIRT_CPUS];
 };
 
-extern void mapcache_init(struct domain *d);
+extern void mapcache_init(struct domain *);
+
+/* x86/64: toggle guest between kernel and user modes. */
+extern void toggle_guest_mode(struct vcpu *);
 
 struct arch_domain
 {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.