[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [VMXASSIST] Emulate pop %ds and mov reg->{ds, es}.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID ca75b51d69c7591b14ac2cfd59566a1c329b86c7
# Parent  d19deb173503962cc42c235cdeda84d3b4a6a52c
[VMXASSIST] Emulate pop %ds and mov reg->{ds,es}.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/firmware/vmxassist/vm86.c |   57 ++++++++++++++++++++++++++++++++++++----
 1 files changed, 52 insertions(+), 5 deletions(-)

diff -r d19deb173503 -r ca75b51d69c7 tools/firmware/vmxassist/vm86.c
--- a/tools/firmware/vmxassist/vm86.c   Fri Nov 10 15:27:22 2006 +0000
+++ b/tools/firmware/vmxassist/vm86.c   Fri Nov 10 17:21:54 2006 +0000
@@ -813,6 +813,40 @@ pop(struct regs *regs, unsigned prefix, 
        return 1;
 }
 
+static int
+mov_to_seg(struct regs *regs, unsigned prefix, unsigned opc)
+{
+       unsigned eip = regs->eip - 1;
+       unsigned modrm = fetch8(regs);
+       unsigned addr = operand(prefix, regs, modrm);
+
+       /* Only need to emulate segment loads in real->protected mode. */
+       if (mode != VM86_REAL_TO_PROTECTED)
+               return 0;
+
+       /* Register source only. */
+       if ((modrm & 0xC0) != 0xC0)
+               goto fail;
+
+       switch ((modrm & 0x38) >> 3) {
+       case 3: /* ds */
+               regs->vds = getreg16(regs, modrm);
+               saved_rm_regs.vds = 0;
+               oldctx.ds_sel = regs->vds;
+               return 1;
+       case 0: /* es */
+               regs->ves = getreg16(regs, modrm);
+               saved_rm_regs.ves = 0;
+               oldctx.es_sel = regs->ves;
+               return 1;
+       }
+
+ fail:
+       printf("%s:%d: missed opcode %02x %02x\n",
+              __FUNCTION__, __LINE__, opc, modrm);
+       return 0;
+}
+
 /*
  * Emulate a segment load in protected mode
  */
@@ -1257,11 +1291,9 @@ opcode(struct regs *regs)
 
        for (;;) {
                switch ((opc = fetch8(regs))) {
-               case 0x07:
-                       if (prefix & DATA32)
-                               regs->ves = pop32(regs);
-                       else
-                               regs->ves = pop16(regs);
+               case 0x07: /* pop %es */
+                       regs->ves = (prefix & DATA32) ?
+                               pop32(regs) : pop16(regs);
                        TRACE((regs, regs->eip - eip, "pop %%es"));
                        if (mode == VM86_REAL_TO_PROTECTED) {
                                saved_rm_regs.ves = 0;
@@ -1316,6 +1348,16 @@ opcode(struct regs *regs)
                        }
                        goto invalid;
 
+               case 0x1F: /* pop %ds */
+                       regs->vds = (prefix & DATA32) ?
+                               pop32(regs) : pop16(regs);
+                       TRACE((regs, regs->eip - eip, "pop %%ds"));
+                       if (mode == VM86_REAL_TO_PROTECTED) {
+                               saved_rm_regs.vds = 0;
+                               oldctx.ds_sel = regs->vds;
+                       }
+                       return OPC_EMULATED;
+
                case 0x26:
                        TRACE((regs, regs->eip - eip, "%%es:"));
                        prefix |= SEG_ES;
@@ -1402,6 +1444,11 @@ opcode(struct regs *regs)
                                 goto invalid;
                         return OPC_EMULATED;
 
+               case 0x8E: /* mov r16, sreg */
+                       if (!mov_to_seg(regs, prefix, opc))
+                               goto invalid;
+                       return OPC_EMULATED;
+
                case 0x8F: /* addr32 pop r/m16 */
                         if ((prefix & ADDR32) == 0)
                                 goto invalid;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.