[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Clean up arch_set_info_guest() by having HVM VCPUs bail early.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1191248898 -3600
# Node ID 22273a5336e50ca38198f4d2ab6b20b27a856221
# Parent  772674585a1a367095fe326b8a659f674099a241
x86: Clean up arch_set_info_guest() by having HVM VCPUs bail early.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/domain.c |  156 ++++++++++++++++++++++++--------------------------
 1 files changed, 77 insertions(+), 79 deletions(-)

diff -r 772674585a1a -r 22273a5336e5 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon Oct 01 15:12:05 2007 +0100
+++ b/xen/arch/x86/domain.c     Mon Oct 01 15:28:18 2007 +0100
@@ -645,21 +645,21 @@ int arch_set_info_guest(
 
     v->arch.guest_context.user_regs.eflags |= 2;
 
+    if ( is_hvm_vcpu(v) )
+        goto out;
+
     /* Only CR0.TS is modifiable by guest or admin. */
     v->arch.guest_context.ctrlreg[0] &= X86_CR0_TS;
     v->arch.guest_context.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS;
 
     init_int80_direct_trap(v);
 
-    if ( !is_hvm_vcpu(v) )
-    {
-        /* IOPL privileges are virtualised. */
-        v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3;
-        v->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
-
-        /* Ensure real hardware interrupts are enabled. */
-        v->arch.guest_context.user_regs.eflags |= EF_IE;
-    }
+    /* IOPL privileges are virtualised. */
+    v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3;
+    v->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
+
+    /* Ensure real hardware interrupts are enabled. */
+    v->arch.guest_context.user_regs.eflags |= EF_IE;
 
     if ( v->is_initialised )
         goto out;
@@ -672,29 +672,44 @@ int arch_set_info_guest(
     if ( v->vcpu_id == 0 )
         d->vm_assist = c(vm_assist);
 
-    if ( !is_hvm_vcpu(v) )
-    {
-        if ( !compat )
-            rc = (int)set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);
+    if ( !compat )
+        rc = (int)set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);
 #ifdef CONFIG_COMPAT
-        else
-        {
-            unsigned long gdt_frames[ARRAY_SIZE(c.cmp->gdt_frames)];
-            unsigned int i, n = (c.cmp->gdt_ents + 511) / 512;
-
-            if ( n > ARRAY_SIZE(c.cmp->gdt_frames) )
-                return -EINVAL;
-            for ( i = 0; i < n; ++i )
-                gdt_frames[i] = c.cmp->gdt_frames[i];
-            rc = (int)set_gdt(v, gdt_frames, c.cmp->gdt_ents);
-        }
-#endif
-        if ( rc != 0 )
-            return rc;
-
-        if ( !compat )
-        {
-            cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[3]));
+    else
+    {
+        unsigned long gdt_frames[ARRAY_SIZE(c.cmp->gdt_frames)];
+        unsigned int i, n = (c.cmp->gdt_ents + 511) / 512;
+
+        if ( n > ARRAY_SIZE(c.cmp->gdt_frames) )
+            return -EINVAL;
+        for ( i = 0; i < n; ++i )
+            gdt_frames[i] = c.cmp->gdt_frames[i];
+        rc = (int)set_gdt(v, gdt_frames, c.cmp->gdt_ents);
+    }
+#endif
+    if ( rc != 0 )
+        return rc;
+
+    if ( !compat )
+    {
+        cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[3]));
+
+        if ( !mfn_valid(cr3_pfn) ||
+             (paging_mode_refcounts(d)
+              ? !get_page(mfn_to_page(cr3_pfn), d)
+              : !get_page_and_type(mfn_to_page(cr3_pfn), d,
+                                   PGT_base_page_table)) )
+        {
+            destroy_gdt(v);
+            return -EINVAL;
+        }
+
+        v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
+
+#ifdef __x86_64__
+        if ( c.nat->ctrlreg[1] )
+        {
+            cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[1]));
 
             if ( !mfn_valid(cr3_pfn) ||
                  (paging_mode_refcounts(d)
@@ -702,59 +717,42 @@ int arch_set_info_guest(
                   : !get_page_and_type(mfn_to_page(cr3_pfn), d,
                                        PGT_base_page_table)) )
             {
+                cr3_pfn = pagetable_get_pfn(v->arch.guest_table);
+                v->arch.guest_table = pagetable_null();
+                if ( paging_mode_refcounts(d) )
+                    put_page(mfn_to_page(cr3_pfn));
+                else
+                    put_page_and_type(mfn_to_page(cr3_pfn));
                 destroy_gdt(v);
                 return -EINVAL;
             }
 
-            v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
-
-#ifdef __x86_64__
-            if ( c.nat->ctrlreg[1] )
-            {
-                cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[1]));
-
-                if ( !mfn_valid(cr3_pfn) ||
-                     (paging_mode_refcounts(d)
-                      ? !get_page(mfn_to_page(cr3_pfn), d)
-                      : !get_page_and_type(mfn_to_page(cr3_pfn), d,
-                                           PGT_base_page_table)) )
-                {
-                    cr3_pfn = pagetable_get_pfn(v->arch.guest_table);
-                    v->arch.guest_table = pagetable_null();
-                    if ( paging_mode_refcounts(d) )
-                        put_page(mfn_to_page(cr3_pfn));
-                    else
-                        put_page_and_type(mfn_to_page(cr3_pfn));
-                    destroy_gdt(v);
-                    return -EINVAL;
-                }
-
-                v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
-            }
-#endif
-        }
+            v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);
+        }
+#endif
+    }
 #ifdef CONFIG_COMPAT
-        else
-        {
-            l4_pgentry_t *l4tab;
-
-            cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
-
-            if ( !mfn_valid(cr3_pfn) ||
-                 (paging_mode_refcounts(d)
-                  ? !get_page(mfn_to_page(cr3_pfn), d)
-                  : !get_page_and_type(mfn_to_page(cr3_pfn), d,
-                                       PGT_l3_page_table)) )
-            {
-                destroy_gdt(v);
-                return -EINVAL;
-            }
-
-            l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
-            *l4tab = l4e_from_pfn(cr3_pfn, 
_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
-        }
-#endif
-    }    
+    else
+    {
+        l4_pgentry_t *l4tab;
+
+        cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
+
+        if ( !mfn_valid(cr3_pfn) ||
+             (paging_mode_refcounts(d)
+              ? !get_page(mfn_to_page(cr3_pfn), d)
+              : !get_page_and_type(mfn_to_page(cr3_pfn), d,
+                                   PGT_l3_page_table)) )
+        {
+            destroy_gdt(v);
+            return -EINVAL;
+        }
+
+        l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
+        *l4tab = l4e_from_pfn(
+            cr3_pfn, _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
+    }
+#endif
 
     if ( v->vcpu_id == 0 )
         update_domain_wallclock_time(d);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.