[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86_emulate: read/write/insn_fetch emulation hooks now all take a



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1214831949 -3600
# Node ID c33a40b4c22bfdd1cdc808bda2bd4d19f48ee4dc
# Parent  51b392ab1912e00d11fe5373ddf8dd2c1d2ae612
x86_emulate: read/write/insn_fetch emulation hooks now all take a
pointer to emulator data buffer, and an arbitrary byte count (up to
the size of a page of memory).

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 tools/tests/test_x86_emulator.c        |    9 -
 xen/arch/x86/hvm/emulate.c             |  113 +++++++++---------
 xen/arch/x86/mm.c                      |   29 +++-
 xen/arch/x86/mm/shadow/common.c        |   32 ++---
 xen/arch/x86/x86_emulate/x86_emulate.c |  201 +++++++++++++++++----------------
 xen/arch/x86/x86_emulate/x86_emulate.h |   37 +++---
 6 files changed, 232 insertions(+), 189 deletions(-)

diff -r 51b392ab1912 -r c33a40b4c22b tools/tests/test_x86_emulator.c
--- a/tools/tests/test_x86_emulator.c   Mon Jun 30 11:39:10 2008 +0100
+++ b/tools/tests/test_x86_emulator.c   Mon Jun 30 14:19:09 2008 +0100
@@ -22,23 +22,22 @@ static int read(
 static int read(
     unsigned int seg,
     unsigned long offset,
-    unsigned long *val,
+    void *p_data,
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
-    *val = 0;
-    memcpy(val, (void *)offset, bytes);
+    memcpy(p_data, (void *)offset, bytes);
     return X86EMUL_OKAY;
 }
 
 static int write(
     unsigned int seg,
     unsigned long offset,
-    unsigned long val,
+    void *p_data,
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
-    memcpy((void *)offset, &val, bytes);
+    memcpy((void *)offset, p_data, bytes);
     return X86EMUL_OKAY;
 }
 
diff -r 51b392ab1912 -r c33a40b4c22b xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Mon Jun 30 11:39:10 2008 +0100
+++ b/xen/arch/x86/hvm/emulate.c        Mon Jun 30 14:19:09 2008 +0100
@@ -21,15 +21,33 @@
 
 static int hvmemul_do_io(
     int is_mmio, paddr_t addr, unsigned long *reps, int size,
-    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
-{
+    paddr_t ram_gpa, int dir, int df, void *p_data)
+{
+    paddr_t value = ram_gpa;
+    int value_is_ptr = (p_data == NULL);
     struct vcpu *curr = current;
     vcpu_iodata_t *vio = get_ioreq(curr);
     ioreq_t *p = &vio->vp_ioreq;
     int rc;
 
-    /* Only retrieve the value from singleton (non-REP) reads. */
-    ASSERT((val == NULL) || ((dir == IOREQ_READ) && !value_is_ptr));
+    /*
+     * Weird-sized accesses have undefined behaviour: we discard writes
+     * and read all-ones.
+     */
+    if ( unlikely((size > sizeof(long)) || (size & (size - 1))) )
+    {
+        gdprintk(XENLOG_WARNING, "bad mmio size %d\n", size);
+        ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
+        if ( dir == IOREQ_READ )
+            memset(p_data, ~0, size);
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    if ( (p_data != NULL) && (dir == IOREQ_WRITE) )
+    {
+        memcpy(&value, p_data, size);
+        p_data = NULL;
+    }
 
     if ( is_mmio && !value_is_ptr )
     {
@@ -47,8 +65,7 @@ static int hvmemul_do_io(
             unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes;
             if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
             {
-                *val = 0;
-                memcpy(val, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
+                memcpy(p_data, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
                        size);
                 return X86EMUL_OKAY;
             }
@@ -61,7 +78,7 @@ static int hvmemul_do_io(
         break;
     case HVMIO_completed:
         curr->arch.hvm_vcpu.io_state = HVMIO_none;
-        if ( val == NULL )
+        if ( p_data == NULL )
             return X86EMUL_UNHANDLEABLE;
         goto finish_access;
     case HVMIO_dispatched:
@@ -82,7 +99,7 @@ static int hvmemul_do_io(
     }
 
     curr->arch.hvm_vcpu.io_state =
-        (val == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
+        (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
 
     p->dir = dir;
     p->data_is_ptr = value_is_ptr;
@@ -116,7 +133,7 @@ static int hvmemul_do_io(
         break;
     case X86EMUL_UNHANDLEABLE:
         hvm_send_assist_req(curr);
-        rc = (val != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
+        rc = (p_data != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
         break;
     default:
         BUG();
@@ -126,8 +143,8 @@ static int hvmemul_do_io(
         return rc;
 
  finish_access:
-    if ( val != NULL )
-        *val = curr->arch.hvm_vcpu.io_data;
+    if ( p_data != NULL )
+        memcpy(p_data, &curr->arch.hvm_vcpu.io_data, size);
 
     if ( is_mmio && !value_is_ptr )
     {
@@ -152,7 +169,7 @@ static int hvmemul_do_io(
                   sizeof(curr->arch.hvm_vcpu.mmio_large_read)) )
             {
                 memcpy(&curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
-                       val, size);
+                       p_data, size);
                 curr->arch.hvm_vcpu.mmio_large_read_bytes += size;
             }
         }
@@ -163,18 +180,16 @@ static int hvmemul_do_io(
 
 static int hvmemul_do_pio(
     unsigned long port, unsigned long *reps, int size,
-    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
-{
-    return hvmemul_do_io(0, port, reps, size, value,
-                         dir, df, value_is_ptr, val);
+    paddr_t ram_gpa, int dir, int df, void *p_data)
+{
+    return hvmemul_do_io(0, port, reps, size, ram_gpa, dir, df, p_data);
 }
 
 static int hvmemul_do_mmio(
     paddr_t gpa, unsigned long *reps, int size,
-    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
-{
-    return hvmemul_do_io(1, gpa, reps, size, value,
-                         dir, df, value_is_ptr, val);
+    paddr_t ram_gpa, int dir, int df, void *p_data)
+{
+    return hvmemul_do_io(1, gpa, reps, size, ram_gpa, dir, df, p_data);
 }
 
 /*
@@ -287,7 +302,7 @@ static int __hvmemul_read(
 static int __hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
-    unsigned long *val,
+    void *p_data,
     unsigned int bytes,
     enum hvm_access_type access_type,
     struct hvm_emulate_ctxt *hvmemul_ctxt)
@@ -302,8 +317,6 @@ static int __hvmemul_read(
         seg, offset, bytes, access_type, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
         return rc;
-
-    *val = 0;
 
     if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
          curr->arch.hvm_vcpu.mmio_gva )
@@ -314,7 +327,7 @@ static int __hvmemul_read(
         gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
         if ( (off + bytes) <= PAGE_SIZE )
             return hvmemul_do_mmio(gpa, &reps, bytes, 0,
-                                   IOREQ_READ, 0, 0, val);
+                                   IOREQ_READ, 0, p_data);
     }
 
     if ( (seg != x86_seg_none) &&
@@ -322,15 +335,13 @@ static int __hvmemul_read(
         pfec |= PFEC_user_mode;
 
     rc = ((access_type == hvm_access_insn_fetch) ?
-          hvm_fetch_from_guest_virt(val, addr, bytes, pfec) :
-          hvm_copy_from_guest_virt(val, addr, bytes, pfec));
+          hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec) :
+          hvm_copy_from_guest_virt(p_data, addr, bytes, pfec));
     if ( rc == HVMCOPY_bad_gva_to_gfn )
         return X86EMUL_EXCEPTION;
 
     if ( rc == HVMCOPY_bad_gfn_to_mfn )
     {
-        unsigned long reps = 1;
-
         if ( access_type == hvm_access_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
 
@@ -339,7 +350,7 @@ static int __hvmemul_read(
         if ( rc != X86EMUL_OKAY )
             return rc;
 
-        return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, 0, val);
+        return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, p_data);
     }
 
     return X86EMUL_OKAY;
@@ -348,19 +359,19 @@ static int hvmemul_read(
 static int hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
-    unsigned long *val,
+    void *p_data,
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
     return __hvmemul_read(
-        seg, offset, val, bytes, hvm_access_read,
+        seg, offset, p_data, bytes, hvm_access_read,
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
 }
 
 static int hvmemul_insn_fetch(
     enum x86_segment seg,
     unsigned long offset,
-    unsigned long *val,
+    void *p_data,
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
@@ -371,19 +382,18 @@ static int hvmemul_insn_fetch(
     /* Fall back if requested bytes are not in the prefetch cache. */
     if ( unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
         return __hvmemul_read(
-            seg, offset, val, bytes,
+            seg, offset, p_data, bytes,
             hvm_access_insn_fetch, hvmemul_ctxt);
 
     /* Hit the cache. Simple memcpy. */
-    *val = 0;
-    memcpy(val, &hvmemul_ctxt->insn_buf[insn_off], bytes);
+    memcpy(p_data, &hvmemul_ctxt->insn_buf[insn_off], bytes);
     return X86EMUL_OKAY;
 }
 
 static int hvmemul_write(
     enum x86_segment seg,
     unsigned long offset,
-    unsigned long val,
+    void *p_data,
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
@@ -406,29 +416,27 @@ static int hvmemul_write(
         unsigned int off = addr & (PAGE_SIZE - 1);
         gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
         if ( (off + bytes) <= PAGE_SIZE )
-            return hvmemul_do_mmio(gpa, &reps, bytes, val,
-                                   IOREQ_WRITE, 0, 0, NULL);
+            return hvmemul_do_mmio(gpa, &reps, bytes, 0,
+                                   IOREQ_WRITE, 0, p_data);
     }
 
     if ( (seg != x86_seg_none) &&
          (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
         pfec |= PFEC_user_mode;
 
-    rc = hvm_copy_to_guest_virt(addr, &val, bytes, pfec);
+    rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec);
     if ( rc == HVMCOPY_bad_gva_to_gfn )
         return X86EMUL_EXCEPTION;
 
     if ( rc == HVMCOPY_bad_gfn_to_mfn )
     {
-        unsigned long reps = 1;
-
         rc = hvmemul_linear_to_phys(
             addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
         if ( rc != X86EMUL_OKAY )
             return rc;
 
-        return hvmemul_do_mmio(gpa, &reps, bytes, val,
-                               IOREQ_WRITE, 0, 0, NULL);
+        return hvmemul_do_mmio(gpa, &reps, bytes, 0,
+                               IOREQ_WRITE, 0, p_data);
     }
 
     return X86EMUL_OKAY;
@@ -442,12 +450,8 @@ static int hvmemul_cmpxchg(
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
-    unsigned long new = 0;
-    if ( bytes > sizeof(new) )
-        return X86EMUL_UNHANDLEABLE;
-    memcpy(&new, p_new, bytes);
     /* Fix this in case the guest is really relying on r-m-w atomicity. */
-    return hvmemul_write(seg, offset, new, bytes, ctxt);
+    return hvmemul_write(seg, offset, p_new, bytes, ctxt);
 }
 
 static int hvmemul_rep_ins(
@@ -480,7 +484,7 @@ static int hvmemul_rep_ins(
         return rc;
 
     return hvmemul_do_pio(src_port, reps, bytes_per_rep, gpa, IOREQ_READ,
-                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
 }
 
 static int hvmemul_rep_outs(
@@ -513,7 +517,7 @@ static int hvmemul_rep_outs(
         return rc;
 
     return hvmemul_do_pio(dst_port, reps, bytes_per_rep, gpa, IOREQ_WRITE,
-                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
 }
 
 static int hvmemul_rep_movs(
@@ -563,14 +567,14 @@ static int hvmemul_rep_movs(
     if ( !p2m_is_ram(p2mt) )
         return hvmemul_do_mmio(
             sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ,
-            !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+            !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
 
     (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
     if ( p2m_is_ram(p2mt) )
         return X86EMUL_UNHANDLEABLE;
     return hvmemul_do_mmio(
         dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE,
-        !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+        !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);
 }
 
 static int hvmemul_read_segment(
@@ -607,7 +611,8 @@ static int hvmemul_read_io(
     struct x86_emulate_ctxt *ctxt)
 {
     unsigned long reps = 1;
-    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, 0, val);
+    *val = 0;
+    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, val);
 }
 
 static int hvmemul_write_io(
@@ -617,7 +622,7 @@ static int hvmemul_write_io(
     struct x86_emulate_ctxt *ctxt)
 {
     unsigned long reps = 1;
-    return hvmemul_do_pio(port, &reps, bytes, val, IOREQ_WRITE, 0, 0, NULL);
+    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_WRITE, 0, &val);
 }
 
 static int hvmemul_read_cr(
diff -r 51b392ab1912 -r c33a40b4c22b xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Jun 30 11:39:10 2008 +0100
+++ b/xen/arch/x86/mm.c Mon Jun 30 14:19:09 2008 +0100
@@ -3539,15 +3539,14 @@ static int ptwr_emulated_read(
 static int ptwr_emulated_read(
     enum x86_segment seg,
     unsigned long offset,
-    unsigned long *val,
+    void *p_data,
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
     unsigned int rc;
     unsigned long addr = offset;
 
-    *val = 0;
-    if ( (rc = copy_from_user((void *)val, (void *)addr, bytes)) != 0 )
+    if ( (rc = copy_from_user(p_data, (void *)addr, bytes)) != 0 )
     {
         propagate_page_fault(addr + bytes - rc, 0); /* read fault */
         return X86EMUL_EXCEPTION;
@@ -3574,7 +3573,7 @@ static int ptwr_emulated_update(
     /* Only allow naturally-aligned stores within the original %cr2 page. */
     if ( unlikely(((addr^ptwr_ctxt->cr2) & PAGE_MASK) || (addr & (bytes-1))) )
     {
-        MEM_LOG("Bad ptwr access (cr2=%lx, addr=%lx, bytes=%u)",
+        MEM_LOG("ptwr_emulate: bad access (cr2=%lx, addr=%lx, bytes=%u)",
                 ptwr_ctxt->cr2, addr, bytes);
         return X86EMUL_UNHANDLEABLE;
     }
@@ -3682,10 +3681,21 @@ static int ptwr_emulated_write(
 static int ptwr_emulated_write(
     enum x86_segment seg,
     unsigned long offset,
-    unsigned long val,
+    void *p_data,
     unsigned int bytes,
     struct x86_emulate_ctxt *ctxt)
 {
+    paddr_t val = 0;
+
+    if ( (bytes > sizeof(paddr_t)) || (bytes & (bytes -1)) )
+    {
+        MEM_LOG("ptwr_emulate: bad write size (addr=%lx, bytes=%u)",
+                offset, bytes);
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    memcpy(&val, p_data, bytes);
+
     return ptwr_emulated_update(
         offset, 0, val, bytes, 0,
         container_of(ctxt, struct ptwr_emulate_ctxt, ctxt));
@@ -3700,10 +3710,17 @@ static int ptwr_emulated_cmpxchg(
     struct x86_emulate_ctxt *ctxt)
 {
     paddr_t old = 0, new = 0;
-    if ( bytes > sizeof(paddr_t) )
+
+    if ( (bytes > sizeof(paddr_t)) || (bytes & (bytes -1)) )
+    {
+        MEM_LOG("ptwr_emulate: bad cmpxchg size (addr=%lx, bytes=%u)",
+                offset, bytes);
         return X86EMUL_UNHANDLEABLE;
+    }
+
     memcpy(&old, p_old, bytes);
     memcpy(&new, p_new, bytes);
+
     return ptwr_emulated_update(
         offset, old, new, bytes, 1,
         container_of(ctxt, struct ptwr_emulate_ctxt, ctxt));
diff -r 51b392ab1912 -r c33a40b4c22b xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Jun 30 11:39:10 2008 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Mon Jun 30 14:19:09 2008 +0100
@@ -145,7 +145,7 @@ static int
 static int
 hvm_read(enum x86_segment seg,
          unsigned long offset,
-         unsigned long *val,
+         void *p_data,
          unsigned int bytes,
          enum hvm_access_type access_type,
          struct sh_emulate_ctxt *sh_ctxt)
@@ -158,12 +158,10 @@ hvm_read(enum x86_segment seg,
     if ( rc )
         return rc;
 
-    *val = 0;
-
     if ( access_type == hvm_access_insn_fetch )
-        rc = hvm_fetch_from_guest_virt(val, addr, bytes, 0);
+        rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0);
     else
-        rc = hvm_copy_from_guest_virt(val, addr, bytes, 0);
+        rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0);
 
     switch ( rc )
     {
@@ -181,20 +179,20 @@ static int
 static int
 hvm_emulate_read(enum x86_segment seg,
                  unsigned long offset,
-                 unsigned long *val,
+                 void *p_data,
                  unsigned int bytes,
                  struct x86_emulate_ctxt *ctxt)
 {
     if ( !is_x86_user_segment(seg) )
         return X86EMUL_UNHANDLEABLE;
-    return hvm_read(seg, offset, val, bytes, hvm_access_read,
+    return hvm_read(seg, offset, p_data, bytes, hvm_access_read,
                     container_of(ctxt, struct sh_emulate_ctxt, ctxt));
 }
 
 static int
 hvm_emulate_insn_fetch(enum x86_segment seg,
                        unsigned long offset,
-                       unsigned long *val,
+                       void *p_data,
                        unsigned int bytes,
                        struct x86_emulate_ctxt *ctxt)
 {
@@ -206,19 +204,18 @@ hvm_emulate_insn_fetch(enum x86_segment 
 
     /* Fall back if requested bytes are not in the prefetch cache. */
     if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) )
-        return hvm_read(seg, offset, val, bytes,
+        return hvm_read(seg, offset, p_data, bytes,
                         hvm_access_insn_fetch, sh_ctxt);
 
     /* Hit the cache. Simple memcpy. */
-    *val = 0;
-    memcpy(val, &sh_ctxt->insn_buf[insn_off], bytes);
+    memcpy(p_data, &sh_ctxt->insn_buf[insn_off], bytes);
     return X86EMUL_OKAY;
 }
 
 static int
 hvm_emulate_write(enum x86_segment seg,
                   unsigned long offset,
-                  unsigned long val,
+                  void *p_data,
                   unsigned int bytes,
                   struct x86_emulate_ctxt *ctxt)
 {
@@ -241,7 +238,7 @@ hvm_emulate_write(enum x86_segment seg,
         return rc;
 
     return v->arch.paging.mode->shadow.x86_emulate_write(
-        v, addr, &val, bytes, sh_ctxt);
+        v, addr, p_data, bytes, sh_ctxt);
 }
 
 static int 
@@ -293,7 +290,7 @@ static int
 static int
 pv_emulate_read(enum x86_segment seg,
                 unsigned long offset,
-                unsigned long *val,
+                void *p_data,
                 unsigned int bytes,
                 struct x86_emulate_ctxt *ctxt)
 {
@@ -302,8 +299,7 @@ pv_emulate_read(enum x86_segment seg,
     if ( !is_x86_user_segment(seg) )
         return X86EMUL_UNHANDLEABLE;
 
-    *val = 0;
-    if ( (rc = copy_from_user((void *)val, (void *)offset, bytes)) != 0 )
+    if ( (rc = copy_from_user(p_data, (void *)offset, bytes)) != 0 )
     {
         propagate_page_fault(offset + bytes - rc, 0); /* read fault */
         return X86EMUL_EXCEPTION;
@@ -315,7 +311,7 @@ static int
 static int
 pv_emulate_write(enum x86_segment seg,
                  unsigned long offset,
-                 unsigned long val,
+                 void *p_data,
                  unsigned int bytes,
                  struct x86_emulate_ctxt *ctxt)
 {
@@ -325,7 +321,7 @@ pv_emulate_write(enum x86_segment seg,
     if ( !is_x86_user_segment(seg) )
         return X86EMUL_UNHANDLEABLE;
     return v->arch.paging.mode->shadow.x86_emulate_write(
-        v, offset, &val, bytes, sh_ctxt);
+        v, offset, p_data, bytes, sh_ctxt);
 }
 
 static int 
diff -r 51b392ab1912 -r c33a40b4c22b xen/arch/x86/x86_emulate/x86_emulate.c
--- a/xen/arch/x86/x86_emulate/x86_emulate.c    Mon Jun 30 11:39:10 2008 +0100
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c    Mon Jun 30 14:19:09 2008 +0100
@@ -466,7 +466,7 @@ do{ asm volatile (                      
 
 /* Fetch next part of the instruction being emulated. */
 #define insn_fetch_bytes(_size)                                         \
-({ unsigned long _x, _eip = _regs.eip;                                  \
+({ unsigned long _x = 0, _eip = _regs.eip;                              \
    if ( !mode_64bit() ) _eip = (uint32_t)_eip; /* ignore upper dword */ \
    _regs.eip += (_size); /* real hardware doesn't truncate */           \
    generate_exception_if((uint8_t)(_regs.eip - ctxt->regs->eip) > 15,   \
@@ -655,6 +655,19 @@ static void __put_rep_prefix(
         __put_rep_prefix(&_regs, ctxt->regs, ad_bytes, reps_completed); \
 })
 
+/* Compatibility function: read guest memory, zero-extend result to a ulong. */
+static int read_ulong(
+        enum x86_segment seg,
+        unsigned long offset,
+        unsigned long *val,
+        unsigned int bytes,
+        struct x86_emulate_ctxt *ctxt,
+        struct x86_emulate_ops *ops)
+{
+    *val = 0;
+    return ops->read(seg, offset, val, bytes, ctxt);
+}
+
 /*
  * Unsigned multiplication with double-word result.
  * IN:  Multiplicand=m[0], Multiplier=m[1]
@@ -841,7 +854,8 @@ static int ioport_access_check(
          (tr.limit < 0x67) )
         goto raise_exception;
 
-    if ( (rc = ops->read(x86_seg_none, tr.base + 0x66, &iobmp, 2, ctxt)) )
+    if ( (rc = read_ulong(x86_seg_none, tr.base + 0x66,
+                          &iobmp, 2, ctxt, ops)) )
         return rc;
 
     /* Ensure TSS includes two bytes including byte containing first port. */
@@ -849,7 +863,8 @@ static int ioport_access_check(
     if ( tr.limit <= iobmp )
         goto raise_exception;
 
-    if ( (rc = ops->read(x86_seg_none, tr.base + iobmp, &iobmp, 2, ctxt)) )
+    if ( (rc = read_ulong(x86_seg_none, tr.base + iobmp,
+                          &iobmp, 2, ctxt, ops)) )
         return rc;
     if ( (iobmp & (((1<<bytes)-1) << (first_port&7))) != 0 )
         goto raise_exception;
@@ -941,12 +956,12 @@ protmode_load_seg(
         goto raise_exn;
 
     do {
-        if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8),
-                             &val, 4, ctxt)) )
+        if ( (rc = read_ulong(x86_seg_none, desctab.base + (sel & 0xfff8),
+                              &val, 4, ctxt, ops)) )
             return rc;
         desc.a = val;
-        if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
-                             &val, 4, ctxt)) )
+        if ( (rc = read_ulong(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
+                              &val, 4, ctxt, ops)) )
             return rc;
         desc.b = val;
 
@@ -1402,8 +1417,8 @@ x86_emulate(
             case 8: src.val = *(uint64_t *)src.reg; break;
             }
         }
-        else if ( (rc = ops->read(src.mem.seg, src.mem.off,
-                                  &src.val, src.bytes, ctxt)) )
+        else if ( (rc = read_ulong(src.mem.seg, src.mem.off,
+                                   &src.val, src.bytes, ctxt, ops)) )
             goto done;
         break;
     case SrcImm:
@@ -1494,8 +1509,8 @@ x86_emulate(
         }
         else if ( !(d & Mov) ) /* optimisation - avoid slow emulated read */
         {
-            if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
-                                 &dst.val, dst.bytes, ctxt)) )
+            if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
+                                  &dst.val, dst.bytes, ctxt, ops)) )
                 goto done;
             dst.orig_val = dst.val;
         }
@@ -1571,8 +1586,8 @@ x86_emulate(
         int lb, ub, idx;
         generate_exception_if(mode_64bit() || (src.type != OP_MEM),
                               EXC_UD, -1);
-        if ( (rc = ops->read(src.mem.seg, src.mem.off + op_bytes,
-                             &src_val2, op_bytes, ctxt)) )
+        if ( (rc = read_ulong(src.mem.seg, src.mem.off + op_bytes,
+                              &src_val2, op_bytes, ctxt, ops)) )
             goto done;
         ub  = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2;
         lb  = (op_bytes == 2) ? (int16_t)src.val  : (int32_t)src.val;
@@ -1588,8 +1603,8 @@ x86_emulate(
             /* movsxd */
             if ( src.type == OP_REG )
                 src.val = *(int32_t *)src.reg;
-            else if ( (rc = ops->read(src.mem.seg, src.mem.off,
-                                      &src.val, 4, ctxt)) )
+            else if ( (rc = read_ulong(src.mem.seg, src.mem.off,
+                                       &src.val, 4, ctxt, ops)) )
                 goto done;
             dst.val = (int32_t)src.val;
         }
@@ -1613,8 +1628,8 @@ x86_emulate(
         unsigned long src1; /* ModR/M source operand */
         if ( ea.type == OP_REG )
             src1 = *ea.reg;
-        else if ( (rc = ops->read(ea.mem.seg, ea.mem.off,
-                                  &src1, op_bytes, ctxt)) )
+        else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
+                                   &src1, op_bytes, ctxt, ops)) )
             goto done;
         _regs.eflags &= ~(EFLG_OF|EFLG_CF);
         switch ( dst.bytes )
@@ -1720,8 +1735,8 @@ x86_emulate(
         /* 64-bit mode: POP defaults to a 64-bit operand. */
         if ( mode_64bit() && (dst.bytes == 4) )
             dst.bytes = 8;
-        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
-                             &dst.val, dst.bytes, ctxt)) != 0 )
+        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
+                              &dst.val, dst.bytes, ctxt, ops)) != 0 )
             goto done;
         break;
 
@@ -1773,8 +1788,8 @@ x86_emulate(
         dst.val = x86_seg_es;
     les: /* dst.val identifies the segment */
         generate_exception_if(src.type != OP_MEM, EXC_UD, -1);
-        if ( (rc = ops->read(src.mem.seg, src.mem.off + src.bytes,
-                             &sel, 2, ctxt)) != 0 )
+        if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
+                              &sel, 2, ctxt, ops)) != 0 )
             goto done;
         if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 )
             goto done;
@@ -2020,8 +2035,8 @@ x86_emulate(
                 dst.bytes = op_bytes = 8;
                 if ( dst.type == OP_REG )
                     dst.val = *dst.reg;
-                else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
-                                          &dst.val, 8, ctxt)) != 0 )
+                else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
+                                           &dst.val, 8, ctxt, ops)) != 0 )
                     goto done;
             }
             src.val = _regs.eip;
@@ -2036,8 +2051,8 @@ x86_emulate(
 
             generate_exception_if(dst.type != OP_MEM, EXC_UD, -1);
 
-            if ( (rc = ops->read(dst.mem.seg, dst.mem.off+dst.bytes,
-                                 &sel, 2, ctxt)) )
+            if ( (rc = read_ulong(dst.mem.seg, dst.mem.off+dst.bytes,
+                                  &sel, 2, ctxt, ops)) )
                 goto done;
 
             if ( (modrm_reg & 7) == 3 ) /* call */
@@ -2046,9 +2061,9 @@ x86_emulate(
                 fail_if(ops->read_segment == NULL);
                 if ( (rc = ops->read_segment(x86_seg_cs, &reg, ctxt)) ||
                      (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
-                                      reg.sel, op_bytes, ctxt)) ||
+                                      &reg.sel, op_bytes, ctxt)) ||
                      (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
-                                      _regs.eip, op_bytes, ctxt)) )
+                                      &_regs.eip, op_bytes, ctxt)) )
                     goto done;
             }
 
@@ -2066,12 +2081,12 @@ x86_emulate(
                 dst.bytes = 8;
                 if ( dst.type == OP_REG )
                     dst.val = *dst.reg;
-                else if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
-                                          &dst.val, 8, ctxt)) != 0 )
+                else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
+                                           &dst.val, 8, ctxt, ops)) != 0 )
                     goto done;
             }
             if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
-                                  dst.val, dst.bytes, ctxt)) != 0 )
+                                  &dst.val, dst.bytes, ctxt)) != 0 )
                 goto done;
             dst.type = OP_NONE;
             break;
@@ -2106,7 +2121,7 @@ x86_emulate(
                 &dst.val, dst.bytes, ctxt);
         else
             rc = ops->write(
-                dst.mem.seg, dst.mem.off, dst.val, dst.bytes, ctxt);
+                dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, ctxt);
         if ( rc != 0 )
             goto done;
     default:
@@ -2153,7 +2168,7 @@ x86_emulate(
         if ( mode_64bit() && (op_bytes == 4) )
             op_bytes = 8;
         if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
-                              reg.sel, op_bytes, ctxt)) != 0 )
+                              &reg.sel, op_bytes, ctxt)) != 0 )
             goto done;
         break;
     }
@@ -2165,8 +2180,8 @@ x86_emulate(
         /* 64-bit mode: POP defaults to a 64-bit operand. */
         if ( mode_64bit() && (op_bytes == 4) )
             op_bytes = 8;
-        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
-                             &dst.val, op_bytes, ctxt)) != 0 )
+        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+                              &dst.val, op_bytes, ctxt, ops)) != 0 )
             goto done;
         if ( (rc = load_seg(src.val, (uint16_t)dst.val, ctxt, ops)) != 0 )
             return rc;
@@ -2275,8 +2290,8 @@ x86_emulate(
         dst.bytes = op_bytes;
         if ( mode_64bit() && (dst.bytes == 4) )
             dst.bytes = 8;
-        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
-                             &dst.val, dst.bytes, ctxt)) != 0 )
+        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
+                              &dst.val, dst.bytes, ctxt, ops)) != 0 )
             goto done;
         break;
 
@@ -2288,7 +2303,7 @@ x86_emulate(
         generate_exception_if(mode_64bit(), EXC_UD, -1);
         for ( i = 0; i < 8; i++ )
             if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
-                                  regs[i], op_bytes, ctxt)) != 0 )
+                                  &regs[i], op_bytes, ctxt)) != 0 )
             goto done;
         break;
     }
@@ -2303,8 +2318,8 @@ x86_emulate(
         generate_exception_if(mode_64bit(), EXC_UD, -1);
         for ( i = 0; i < 8; i++ )
         {
-            if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
-                                 &dst.val, op_bytes, ctxt)) != 0 )
+            if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+                                  &dst.val, op_bytes, ctxt, ops)) != 0 )
                 goto done;
             switch ( op_bytes )
             {
@@ -2382,8 +2397,8 @@ x86_emulate(
         }
         else
         {
-            if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
-                                 &dst.val, dst.bytes, ctxt)) != 0 )
+            if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
+                                  &dst.val, dst.bytes, ctxt, ops)) != 0 )
                 goto done;
             fail_if(ops->write_io == NULL);
             if ( (rc = ops->write_io(port, dst.bytes, dst.val, ctxt)) != 0 )
@@ -2455,9 +2470,9 @@ x86_emulate(
 
         if ( (rc = ops->read_segment(x86_seg_cs, &reg, ctxt)) ||
              (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
-                              reg.sel, op_bytes, ctxt)) ||
+                              &reg.sel, op_bytes, ctxt)) ||
              (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
-                              _regs.eip, op_bytes, ctxt)) )
+                              &_regs.eip, op_bytes, ctxt)) )
             goto done;
 
         if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
@@ -2483,8 +2498,8 @@ x86_emulate(
         /* 64-bit mode: POP defaults to a 64-bit operand. */
         if ( mode_64bit() && (op_bytes == 4) )
             op_bytes = 8;
-        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
-                             &dst.val, op_bytes, ctxt)) != 0 )
+        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+                              &dst.val, op_bytes, ctxt, ops)) != 0 )
             goto done;
         if ( op_bytes == 2 )
             dst.val = (uint16_t)dst.val | (_regs.eflags & 0xffff0000u);
@@ -2507,8 +2522,8 @@ x86_emulate(
         dst.type  = OP_REG;
         dst.reg   = (unsigned long *)&_regs.eax;
         dst.bytes = (d & ByteOp) ? 1 : op_bytes;
-        if ( (rc = ops->read(ea.mem.seg, insn_fetch_bytes(ad_bytes),
-                             &dst.val, dst.bytes, ctxt)) != 0 )
+        if ( (rc = read_ulong(ea.mem.seg, insn_fetch_bytes(ad_bytes),
+                              &dst.val, dst.bytes, ctxt, ops)) != 0 )
             goto done;
         break;
 
@@ -2536,8 +2551,8 @@ x86_emulate(
         }
         else
         {
-            if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
-                                 &dst.val, dst.bytes, ctxt)) != 0 )
+            if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
+                                  &dst.val, dst.bytes, ctxt, ops)) != 0 )
                 goto done;
             dst.type = OP_MEM;
             nr_reps = 1;
@@ -2556,10 +2571,10 @@ x86_emulate(
         unsigned long next_eip = _regs.eip;
         get_rep_prefix();
         src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes;
-        if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
-                             &dst.val, dst.bytes, ctxt)) ||
-             (rc = ops->read(x86_seg_es, truncate_ea(_regs.edi),
-                             &src.val, src.bytes, ctxt)) )
+        if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
+                              &dst.val, dst.bytes, ctxt, ops)) ||
+             (rc = read_ulong(x86_seg_es, truncate_ea(_regs.edi),
+                              &src.val, src.bytes, ctxt, ops)) )
             goto done;
         register_address_increment(
             _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
@@ -2592,8 +2607,8 @@ x86_emulate(
         dst.type  = OP_REG;
         dst.bytes = (d & ByteOp) ? 1 : op_bytes;
         dst.reg   = (unsigned long *)&_regs.eax;
-        if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.esi),
-                             &dst.val, dst.bytes, ctxt)) != 0 )
+        if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.esi),
+                              &dst.val, dst.bytes, ctxt, ops)) != 0 )
             goto done;
         register_address_increment(
             _regs.esi, (_regs.eflags & EFLG_DF) ? -dst.bytes : dst.bytes);
@@ -2606,8 +2621,8 @@ x86_emulate(
         get_rep_prefix();
         src.bytes = dst.bytes = (d & ByteOp) ? 1 : op_bytes;
         dst.val = _regs.eax;
-        if ( (rc = ops->read(x86_seg_es, truncate_ea(_regs.edi),
-                             &src.val, src.bytes, ctxt)) != 0 )
+        if ( (rc = read_ulong(x86_seg_es, truncate_ea(_regs.edi),
+                              &src.val, src.bytes, ctxt, ops)) != 0 )
             goto done;
         register_address_increment(
             _regs.edi, (_regs.eflags & EFLG_DF) ? -src.bytes : src.bytes);
@@ -2624,8 +2639,8 @@ x86_emulate(
     case 0xc3: /* ret (near) */ {
         int offset = (b == 0xc2) ? insn_fetch_type(uint16_t) : 0;
         op_bytes = mode_64bit() ? 8 : op_bytes;
-        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
-                             &dst.val, op_bytes, ctxt)) != 0 )
+        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
+                              &dst.val, op_bytes, ctxt, ops)) != 0 )
             goto done;
         _regs.eip = dst.val;
         break;
@@ -2640,7 +2655,7 @@ x86_emulate(
         dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes;
         dst.reg = (unsigned long *)&_regs.ebp;
         if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
-                              _regs.ebp, dst.bytes, ctxt)) )
+                              &_regs.ebp, dst.bytes, ctxt)) )
             goto done;
         dst.val = _regs.esp;
 
@@ -2650,14 +2665,14 @@ x86_emulate(
             {
                 unsigned long ebp, temp_data;
                 ebp = truncate_word(_regs.ebp - i*dst.bytes, ctxt->sp_size/8);
-                if ( (rc = ops->read(x86_seg_ss, ebp,
-                                     &temp_data, dst.bytes, ctxt)) ||
+                if ( (rc = read_ulong(x86_seg_ss, ebp,
+                                      &temp_data, dst.bytes, ctxt, ops)) ||
                      (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
-                                      temp_data, dst.bytes, ctxt)) )
+                                      &temp_data, dst.bytes, ctxt)) )
                     goto done;
             }
             if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
-                                  dst.val, dst.bytes, ctxt)) )
+                                  &dst.val, dst.bytes, ctxt)) )
                 goto done;
         }
 
@@ -2683,8 +2698,8 @@ x86_emulate(
 
         /* Second writeback, to %%ebp. */
         dst.reg = (unsigned long *)&_regs.ebp;
-        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(dst.bytes),
-                             &dst.val, dst.bytes, ctxt)) )
+        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
+                              &dst.val, dst.bytes, ctxt, ops)) )
             goto done;
         break;
 
@@ -2692,10 +2707,10 @@ x86_emulate(
     case 0xcb: /* ret (far) */ {
         int offset = (b == 0xca) ? insn_fetch_type(uint16_t) : 0;
         op_bytes = mode_64bit() ? 8 : op_bytes;
-        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
-                             &dst.val, op_bytes, ctxt)) || 
-             (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes + offset),
-                             &src.val, op_bytes, ctxt)) ||
+        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+                              &dst.val, op_bytes, ctxt, ops)) || 
+             (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
+                              &src.val, op_bytes, ctxt, ops)) ||
              (rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) )
             goto done;
         _regs.eip = dst.val;
@@ -2729,12 +2744,12 @@ x86_emulate(
         if ( !mode_iopl() )
             mask |= EFLG_IF;
         fail_if(!in_realmode(ctxt, ops));
-        if ( (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
-                             &eip, op_bytes, ctxt)) ||
-             (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
-                             &cs, op_bytes, ctxt)) ||
-             (rc = ops->read(x86_seg_ss, sp_post_inc(op_bytes),
-                             &eflags, op_bytes, ctxt)) )
+        if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+                              &eip, op_bytes, ctxt, ops)) ||
+             (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+                              &cs, op_bytes, ctxt, ops)) ||
+             (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+                              &eflags, op_bytes, ctxt, ops)) )
             goto done;
         if ( op_bytes == 2 )
             eflags = (uint16_t)eflags | (_regs.eflags & 0xffff0000u);
@@ -2779,8 +2794,8 @@ x86_emulate(
 
     case 0xd7: /* xlat */ {
         unsigned long al = (uint8_t)_regs.eax;
-        if ( (rc = ops->read(ea.mem.seg, truncate_ea(_regs.ebx + al),
-                             &al, 1, ctxt)) != 0 )
+        if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.ebx + al),
+                              &al, 1, ctxt, ops)) != 0 )
             goto done;
         *(uint8_t *)&_regs.eax = al;
         break;
@@ -3242,9 +3257,9 @@ x86_emulate(
             if ( op_bytes == 2 )
                 reg.base &= 0xffffff;
             if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0,
-                                  reg.limit, 2, ctxt)) ||
+                                  &reg.limit, 2, ctxt)) ||
                  (rc = ops->write(ea.mem.seg, ea.mem.off+2,
-                                  reg.base, mode_64bit() ? 8 : 4, ctxt)) )
+                                  &reg.base, mode_64bit() ? 8 : 4, ctxt)) )
                 goto done;
             break;
         case 2: /* lgdt */
@@ -3252,10 +3267,10 @@ x86_emulate(
             generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
             fail_if(ops->write_segment == NULL);
             memset(&reg, 0, sizeof(reg));
-            if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0,
-                                 &limit, 2, ctxt)) ||
-                 (rc = ops->read(ea.mem.seg, ea.mem.off+2,
-                                 &base, mode_64bit() ? 8 : 4, ctxt)) )
+            if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
+                                  &limit, 2, ctxt, ops)) ||
+                 (rc = read_ulong(ea.mem.seg, ea.mem.off+2,
+                                  &base, mode_64bit() ? 8 : 4, ctxt, ops)) )
                 goto done;
             reg.base = base;
             reg.limit = limit;
@@ -3282,8 +3297,8 @@ x86_emulate(
                 goto done;
             if ( ea.type == OP_REG )
                 cr0w = *ea.reg;
-            else if ( (rc = ops->read(ea.mem.seg, ea.mem.off,
-                                      &cr0w, 2, ctxt)) )
+            else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
+                                       &cr0w, 2, ctxt, ops)) )
                 goto done;
             /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */
             cr0 = (cr0 & ~0xe) | (cr0w & 0xf);
@@ -3405,8 +3420,10 @@ x86_emulate(
         if ( ea.type == OP_MEM )
         {
             unsigned long lval, hval;
-            if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &lval, 4, ctxt)) ||
-                 (rc = ops->read(ea.mem.seg, ea.mem.off+4, &hval, 4, ctxt)) )
+            if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0,
+                                  &lval, 4, ctxt, ops)) ||
+                 (rc = read_ulong(ea.mem.seg, ea.mem.off+4,
+                                  &hval, 4, ctxt, ops)) )
                 goto done;
             val = ((uint64_t)hval << 32) | (uint32_t)lval;
             stub[2] = modrm & 0x38; /* movq (%eax),%mmN */
@@ -3429,8 +3446,8 @@ x86_emulate(
         if ( ea.type == OP_MEM )
         {
             unsigned long lval = (uint32_t)val, hval = (uint32_t)(val >> 32);
-            if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0, lval, 4, ctxt)) ||
-                 (rc = ops->write(ea.mem.seg, ea.mem.off+4, hval, 4, ctxt)) )
+            if ( (rc = ops->write(ea.mem.seg, ea.mem.off+0, &lval, 4, ctxt)) ||
+                 (rc = ops->write(ea.mem.seg, ea.mem.off+4, &hval, 4, ctxt)) )
                 goto done;
         }
         break;
@@ -3482,8 +3499,8 @@ x86_emulate(
 
         /* Get actual old value. */
         for ( i = 0; i < (op_bytes/sizeof(long)); i++ )
-            if ( (rc = ops->read(ea.mem.seg, ea.mem.off + i*sizeof(long),
-                                 &old[i], sizeof(long), ctxt)) != 0 )
+            if ( (rc = read_ulong(ea.mem.seg, ea.mem.off + i*sizeof(long),
+                                  &old[i], sizeof(long), ctxt, ops)) != 0 )
                 goto done;
 
         /* Get expected and proposed values. */
diff -r 51b392ab1912 -r c33a40b4c22b xen/arch/x86/x86_emulate/x86_emulate.h
--- a/xen/arch/x86/x86_emulate/x86_emulate.h    Mon Jun 30 11:39:10 2008 +0100
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h    Mon Jun 30 14:19:09 2008 +0100
@@ -102,7 +102,8 @@ enum x86_emulate_fpu_type {
 };
 
 /*
- * These operations represent the instruction emulator's interface to memory.
+ * These operations represent the instruction emulator's interface to memory,
+ * I/O ports, privileged state... pretty much everything other than GPRs.
  * 
  * NOTES:
  *  1. If the access fails (cannot emulate, or a standard access faults) then
@@ -110,8 +111,7 @@ enum x86_emulate_fpu_type {
  *     some out-of-band mechanism, unknown to the emulator. The memop signals
  *     failure by returning X86EMUL_EXCEPTION to the emulator, which will
  *     then immediately bail.
- *  2. Valid access sizes are 1, 2, 4 and 8 (x86/64 only) bytes.
- *  3. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
+ *  2. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
  */
 struct x86_emulate_ops
 {
@@ -121,19 +121,25 @@ struct x86_emulate_ops
      * All memory-access functions:
      *  @seg:   [IN ] Segment being dereferenced (specified as x86_seg_??).
      *  @offset:[IN ] Offset within segment.
+     *  @p_data:[IN ] Pointer to i/o data buffer (length is @bytes)
      * Read functions:
      *  @val:   [OUT] Value read, zero-extended to 'ulong'.
      * Write functions:
      *  @val:   [IN ] Value to write (low-order bytes used as req'd).
      * Variable-length access functions:
-     *  @bytes: [IN ] Number of bytes to read or write.
-     */
-
-    /* read: Emulate a memory read. */
+     *  @bytes: [IN ] Number of bytes to read or write. Valid access sizes are
+     *                1, 2, 4 and 8 (x86/64 only) bytes, unless otherwise
+     *                stated.
+     */
+
+    /*
+     * read: Emulate a memory read.
+     *  @bytes: Access length (0 < @bytes < 4096).
+     */
     int (*read)(
         enum x86_segment seg,
         unsigned long offset,
-        unsigned long *val,
+        void *p_data,
         unsigned int bytes,
         struct x86_emulate_ctxt *ctxt);
 
@@ -144,15 +150,18 @@ struct x86_emulate_ops
     int (*insn_fetch)(
         enum x86_segment seg,
         unsigned long offset,
-        unsigned long *val,
-        unsigned int bytes,
-        struct x86_emulate_ctxt *ctxt);
-
-    /* write: Emulate a memory write. */
+        void *p_data,
+        unsigned int bytes,
+        struct x86_emulate_ctxt *ctxt);
+
+    /*
+     * write: Emulate a memory write.
+     *  @bytes: Access length (0 < @bytes < 4096).
+     */
     int (*write)(
         enum x86_segment seg,
         unsigned long offset,
-        unsigned long val,
+        void *p_data,
         unsigned int bytes,
         struct x86_emulate_ctxt *ctxt);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.