[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Fix vmxassist to boot Vista.



# HG changeset patch
# User Steven Hand <steven@xxxxxxxxxxxxx>
# Node ID 3236311a23a5083ec78cac53ccad1e92d619e8a1
# Parent  ad22c711ccb7c6734e8579f3d13125d467d25b2c
Fix vmxassist to boot Vista.

Currently, guest_linear_to_real to_real() in vmxassist assumes guest HVM
use 2 level page table when entering protect mode with PG enabled. Vista
uses 3 level (PAE enabled) page table, so memory addressing is wrong.
This patch fixes it, by considering all 4 possible cases.

Signed-off-by: Xiaowei Yang <xiaowei.yang@xxxxxxxxx>
---
 tools/firmware/vmxassist/machine.h |    1 
 tools/firmware/vmxassist/vm86.c    |   87 ++++++++++++++++++++++++++++---------
 2 files changed, 68 insertions(+), 20 deletions(-)

diff -r ad22c711ccb7 -r 3236311a23a5 tools/firmware/vmxassist/machine.h
--- a/tools/firmware/vmxassist/machine.h        Fri Sep 22 11:37:31 2006 +0100
+++ b/tools/firmware/vmxassist/machine.h        Fri Sep 22 12:14:22 2006 +0100
@@ -36,6 +36,7 @@
 #define CR4_VME                (1 << 0)
 #define CR4_PVI                (1 << 1)
 #define CR4_PSE                (1 << 4)
+#define CR4_PAE                (1 << 5)
 
 #define EFLAGS_ZF      (1 << 6)
 #define EFLAGS_TF      (1 << 8)
diff -r ad22c711ccb7 -r 3236311a23a5 tools/firmware/vmxassist/vm86.c
--- a/tools/firmware/vmxassist/vm86.c   Fri Sep 22 11:37:31 2006 +0100
+++ b/tools/firmware/vmxassist/vm86.c   Fri Sep 22 12:14:22 2006 +0100
@@ -52,29 +52,74 @@ static char *rnames[] = { "ax", "cx", "d
 static char *rnames[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" };
 #endif /* DEBUG */
 
+#define PDE_PS           (1 << 7)
 #define PT_ENTRY_PRESENT 0x1
 
+/* We only support access to <=4G physical memory due to 1:1 mapping */
 static unsigned
-guest_linear_to_real(unsigned long base, unsigned off)
-{
-       unsigned int gcr3 = oldctx.cr3;
-       unsigned int l1_mfn;
-       unsigned int l0_mfn;
+guest_linear_to_real(uint32_t base)
+{
+       uint32_t gcr3 = oldctx.cr3;
+       uint64_t l2_mfn;
+       uint64_t l1_mfn;
+       uint64_t l0_mfn;
 
        if (!(oldctx.cr0 & CR0_PG))
-               return base + off;
-
-       l1_mfn = ((unsigned int *)gcr3)[(base >> 22) & 0x3ff ];
-       if (!(l1_mfn & PT_ENTRY_PRESENT))
-               panic("l2 entry not present\n");
-       l1_mfn = l1_mfn & 0xfffff000 ;
-
-       l0_mfn = ((unsigned int *)l1_mfn)[(base >> 12) & 0x3ff];
-       if (!(l0_mfn & PT_ENTRY_PRESENT))
-               panic("l1 entry not present\n");
-       l0_mfn = l0_mfn & 0xfffff000;
-
-       return l0_mfn + off + (base & 0xfff);
+               return base;
+
+       if (!(oldctx.cr4 & CR4_PAE)) {
+               l1_mfn = ((uint32_t *)gcr3)[(base >> 22) & 0x3ff];
+
+               if (oldctx.cr4 & CR4_PSE || l1_mfn & PDE_PS) {
+                        /* 1 level page table */
+                       l0_mfn = l1_mfn;
+                       if (!(l0_mfn & PT_ENTRY_PRESENT))
+                               panic("l1 entry not present\n");
+
+                       l0_mfn &= 0xffc00000;
+                       return l0_mfn + (base & 0x3fffff);
+               }
+
+               if (!(l1_mfn & PT_ENTRY_PRESENT))
+                       panic("l2 entry not present\n");
+
+               l1_mfn &= 0xfffff000;
+               l0_mfn = ((uint32_t *)l1_mfn)[(base >> 12) & 0x3ff];
+               if (!(l0_mfn & PT_ENTRY_PRESENT))
+                       panic("l1 entry not present\n");
+               l0_mfn &= 0xfffff000;
+
+               return l0_mfn + (base & 0xfff);
+       } else if (oldctx.cr4 & CR4_PAE && !(oldctx.cr4 & CR4_PSE)) {
+               l2_mfn = ((uint64_t *)gcr3)[(base >> 30) & 0x3];
+               if (!(l2_mfn & PT_ENTRY_PRESENT))
+                       panic("l3 entry not present\n");
+               l2_mfn &= 0x3fffff000ULL;
+
+               l1_mfn = ((uint64_t *)l2_mfn)[(base >> 21) & 0x1ff];
+               if (!(l1_mfn & PT_ENTRY_PRESENT))
+                       panic("l2 entry not present\n");
+               l1_mfn &= 0x3fffff000ULL;
+
+               l0_mfn = ((uint64_t *)l1_mfn)[(base >> 12) & 0x1ff];
+               if (!(l0_mfn & PT_ENTRY_PRESENT))
+                       panic("l1 entry not present\n");
+               l0_mfn &= 0x3fffff000ULL;
+
+               return l0_mfn + (base & 0xfff);
+       } else { /* oldctx.cr4 & CR4_PAE && oldctx.cr4 & CR4_PSE */
+               l1_mfn = ((uint64_t *)gcr3)[(base >> 30) & 0x3];
+               if (!(l1_mfn & PT_ENTRY_PRESENT))
+                       panic("l2 entry not present\n");
+               l1_mfn &= 0x3fffff000ULL;
+
+               l0_mfn = ((uint64_t *)l1_mfn)[(base >> 21) & 0x1ff];
+               if (!(l0_mfn & PT_ENTRY_PRESENT))
+                       panic("l1 entry not present\n");
+               l0_mfn &= 0x3ffe00000ULL;
+
+               return l0_mfn + (base & 0x1fffff);
+       }
 }
 
 static unsigned
@@ -95,7 +140,8 @@ address(struct regs *regs, unsigned seg,
            (mode == VM86_REAL_TO_PROTECTED && regs->cs == seg))
                return ((seg & 0xFFFF) << 4) + off;
 
-       entry = ((unsigned long long *) guest_linear_to_real(oldctx.gdtr_base, 
0))[seg >> 3];
+       entry = ((unsigned long long *)
+                 guest_linear_to_real(oldctx.gdtr_base))[seg >> 3];
        entry_high = entry >> 32;
        entry_low = entry & 0xFFFFFFFF;
 
@@ -780,7 +826,8 @@ load_seg(unsigned long sel, uint32_t *ba
                return 1;
        }
 
-       entry = ((unsigned long long *) guest_linear_to_real(oldctx.gdtr_base, 
0))[sel >> 3];
+       entry = ((unsigned long long *)
+                 guest_linear_to_real(oldctx.gdtr_base))[sel >> 3];
 
        /* Check the P bit first */
        if (!((entry >> (15+32)) & 0x1) && sel != 0)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.