[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [PATCH] vmx-worldswitch-1-to-1.patch



ChangeSet 1.1577, 2005/05/28 09:53:55+01:00, arun.sharma@xxxxxxxxx

        [PATCH] vmx-worldswitch-1-to-1.patch
        
        Fix VMX world switch to use 1:1 page tables when the guest has paging
        disabled. Also do a printk instead of VMX_DBG_LOG() anytime we crash
        a domain.
        
        Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>



 tools/libxc/xc_vmx_build.c     |    2 -
 xen/arch/x86/domain.c          |    1 
 xen/arch/x86/vmx.c             |   58 ++++++++++++++++++++---------------------
 xen/arch/x86/vmx_io.c          |    2 -
 xen/arch/x86/vmx_platform.c    |   10 ++++---
 xen/arch/x86/vmx_vmcs.c        |    2 -
 xen/arch/x86/x86_32/traps.c    |    8 ++++-
 xen/include/asm-x86/shadow.h   |    7 +++-
 xen/include/asm-x86/vmx.h      |   11 +++++++
 xen/include/asm-x86/vmx_vmcs.h |    1 
 10 files changed, 61 insertions(+), 41 deletions(-)


diff -Nru a/tools/libxc/xc_vmx_build.c b/tools/libxc/xc_vmx_build.c
--- a/tools/libxc/xc_vmx_build.c        2005-05-28 05:03:50 -04:00
+++ b/tools/libxc/xc_vmx_build.c        2005-05-28 05:03:50 -04:00
@@ -10,7 +10,7 @@
 #include <zlib.h>
 #include "linux_boot_params.h"
 
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
 
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     2005-05-28 05:03:50 -04:00
+++ b/xen/arch/x86/domain.c     2005-05-28 05:03:50 -04:00
@@ -339,7 +339,6 @@
     }
 
     ed->arch.schedule_tail = arch_vmx_do_launch;
-    clear_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state);
 
 #if defined (__i386)
     ed->arch.arch_vmx.vmx_platform.real_mode_data = 
diff -Nru a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        2005-05-28 05:03:50 -04:00
+++ b/xen/arch/x86/vmx.c        2005-05-28 05:03:50 -04:00
@@ -122,7 +122,6 @@
 
 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
 {
-    struct exec_domain *ed = current;
     unsigned long eip;
     l1_pgentry_t gpte;
     unsigned long gpa; /* FIXME: PAE */
@@ -137,15 +136,8 @@
     }
 #endif
 
-    /*
-     * If vpagetable is zero, then we are still emulating 1:1 page tables,
-     * and we should have never gotten here.
-     */
-    if ( !test_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state) )
-    {
-        printk("vmx_do_page_fault while running on 1:1 page table\n");
-        return 0;
-    }
+    if (!vmx_paging_enabled(current))
+        handle_mmio(va, va);
 
     gpte = gva_to_gpte(va);
     if (!(l1e_get_flags(gpte) & _PAGE_PRESENT) )
@@ -399,7 +391,7 @@
 
     vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
     if (vio == 0) {
-        VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx", (unsigned long) vio);
+        printk("bad shared page: %lx", (unsigned long) vio);
         domain_crash_synchronous(); 
     }
     p = &vio->vp_ioreq;
@@ -423,7 +415,10 @@
             laddr = (p->dir == IOREQ_WRITE) ? regs->esi : regs->edi;
         }
         p->pdata_valid = 1;
-        p->u.pdata = (void *) gva_to_gpa(laddr);
+
+        p->u.data = laddr;
+        if (vmx_paging_enabled(d))
+                p->u.pdata = (void *) gva_to_gpa(p->u.data);
         p->df = (eflags & X86_EFLAGS_DF) ? 1 : 0;
 
         if (test_bit(5, &exit_qualification)) /* "rep" prefix */
@@ -481,7 +476,7 @@
        return 0;
     }
 
-    mfn = phys_to_machine_mapping(l1e_get_pfn(gva_to_gpte(laddr)));
+    mfn = phys_to_machine_mapping(laddr >> PAGE_SHIFT);
     addr = map_domain_mem((mfn << PAGE_SHIFT) | (laddr & ~PAGE_MASK));
 
     if (dir == COPY_IN)
@@ -570,6 +565,12 @@
 
     error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
 
+    if (!vmx_paging_enabled(d)) {
+       VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
+       __vmwrite(GUEST_CR3, pagetable_val(d->domain->arch.phys_table));
+        goto skip_cr3;
+    }
+
     if (c->cr3 == d->arch.arch_vmx.cpu_cr3) {
        /* 
         * This is simple TLB flush, implying the guest has 
@@ -578,7 +579,7 @@
         */
        mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT);
        if ((mfn << PAGE_SHIFT) != pagetable_val(d->arch.guest_table)) {
-           VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value=%lx", c->cr3);
+           printk("Invalid CR3 value=%lx", c->cr3);
            domain_crash_synchronous();
            return 0;
        }
@@ -590,7 +591,7 @@
         */
        VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %lx", c->cr3);
        if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) {
-           VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value=%lx", c->cr3);
+           printk("Invalid CR3 value=%lx", c->cr3);
            domain_crash_synchronous(); 
            return 0;
        }
@@ -605,6 +606,8 @@
        __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
     }
 
+skip_cr3:
+
     error |= __vmread(CR4_READ_SHADOW, &old_cr4);
     error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE));
     error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
@@ -731,18 +734,18 @@
     struct exec_domain *d = current;
     unsigned long old_base_mfn, mfn;
     unsigned long eip;
+    int paging_enabled;
 
     /* 
      * CR0: We don't want to lose PE and PG.
      */
+    paging_enabled = vmx_paging_enabled(d);
     __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
+    __vmwrite(CR0_READ_SHADOW, value);
 
-    if (value & (X86_CR0_PE | X86_CR0_PG) &&
-        !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
-        /*
-         * Enable paging
-         */
-        set_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state);
+    VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
+    if ((value & X86_CR0_PE) && (value & X86_CR0_PG) 
+        && !paging_enabled) {
         /*
          * The guest CR3 must be pointing to the guest physical.
          */
@@ -750,8 +753,7 @@
                             d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
              !get_page(pfn_to_page(mfn), d->domain) )
         {
-            VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx",
-                        d->arch.arch_vmx.cpu_cr3);
+            printk("Invalid CR3 value = %lx", d->arch.arch_vmx.cpu_cr3);
             domain_crash_synchronous(); /* need to take a clean path */
         }
         old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
@@ -776,8 +778,7 @@
     } else {
         if ((value & X86_CR0_PE) == 0) {
             __vmread(GUEST_EIP, &eip);
-            VMX_DBG_LOG(DBG_LEVEL_1,
-               "Disabling CR0.PE at %%eip 0x%lx", eip);
+            VMX_DBG_LOG(DBG_LEVEL_1, "Disabling CR0.PE at %%eip 0x%lx\n", eip);
            if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
                set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
                                        &d->arch.arch_vmx.cpu_state);
@@ -838,7 +839,6 @@
     switch(cr) {
     case 0: 
     {
-       __vmwrite(CR0_READ_SHADOW, value);
        return vmx_set_cr0(value);
     }
     case 3: 
@@ -848,7 +848,7 @@
         /*
          * If paging is not enabled yet, simply copy the value to CR3.
          */
-        if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
+        if (!vmx_paging_enabled(d)) {
             d->arch.arch_vmx.cpu_cr3 = value;
             break;
         }
@@ -876,8 +876,7 @@
                  !VALID_MFN(mfn = phys_to_machine_mapping(value >> 
PAGE_SHIFT)) ||
                  !get_page(pfn_to_page(mfn), d->domain) )
             {
-                VMX_DBG_LOG(DBG_LEVEL_VMMU, 
-                        "Invalid CR3 value=%lx", value);
+                printk("Invalid CR3 value=%lx", value);
                 domain_crash_synchronous(); /* need to take a clean path */
             }
             old_base_mfn = pagetable_get_pfn(d->arch.guest_table);
@@ -1133,6 +1132,7 @@
         VMX_DBG_LOG(DBG_LEVEL_0, "exit reason = %x", exit_reason);
 
     if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
+        printk("Failed vm entry\n");
         domain_crash_synchronous();         
         return;
     }
diff -Nru a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c     2005-05-28 05:03:50 -04:00
+++ b/xen/arch/x86/vmx_io.c     2005-05-28 05:03:50 -04:00
@@ -465,7 +465,7 @@
 void vmx_do_resume(struct exec_domain *d) 
 {
     vmx_stts();
-    if ( test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state) )
+    if ( vmx_paging_enabled(d) )
         __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
     else
         // paging is not enabled in the guest
diff -Nru a/xen/arch/x86/vmx_platform.c b/xen/arch/x86/vmx_platform.c
--- a/xen/arch/x86/vmx_platform.c       2005-05-28 05:03:50 -04:00
+++ b/xen/arch/x86/vmx_platform.c       2005-05-28 05:03:50 -04:00
@@ -418,8 +418,12 @@
     }
 
     if ((guest_eip & PAGE_MASK) == ((guest_eip + inst_len) & PAGE_MASK)) {
-        gpte = gva_to_gpte(guest_eip);
-        mfn = phys_to_machine_mapping(l1e_get_pfn(gpte));
+        if (vmx_paging_enabled(current)) {
+                gpte = gva_to_gpte(guest_eip);
+                mfn = phys_to_machine_mapping(l1e_get_pfn(gpte));
+        } else {
+                mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT);
+        }
         ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
         inst_start = (unsigned char *)map_domain_mem(ma);
                 
@@ -508,7 +512,7 @@
     } else
         p->count = 1;
 
-    if (pvalid)
+    if ((pvalid) && vmx_paging_enabled(current))
         p->u.pdata = (void *) gva_to_gpa(p->u.data);
 
 #if 0
diff -Nru a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   2005-05-28 05:03:50 -04:00
+++ b/xen/arch/x86/vmx_vmcs.c   2005-05-28 05:03:50 -04:00
@@ -291,7 +291,7 @@
 
     /* Initally PG, PE are not set*/
     shadow_cr = host_env->cr0;
-    shadow_cr &= ~(X86_CR0_PE | X86_CR0_PG);
+    shadow_cr &= ~X86_CR0_PG;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.