[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: For functions which translate virtual addresses to machine



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1198756830 0
# Node ID e818c24cec031a4509298708aff3abed94b278cc
# Parent  1e3e30670ce4449074afdbbea23dd7382e676eb4
hvm: For functions which translate virtual addresses to machine
addresses, page faults should only be raised when the gva->gfn
translation fails. These should be distinguished from gfn->mfn
translation failures.

The main effect of this is to change the behaviour of functions
derived from __hvm_copy(), which now returns a three-way enumeration,
and also can automatically inject #PF when the gva->gfn translation
fails.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |   81 +++++++++++++++++++++++++++-----------
 xen/arch/x86/hvm/io.c             |   36 ++--------------
 xen/arch/x86/hvm/platform.c       |   27 ++++--------
 xen/arch/x86/hvm/svm/svm.c        |   20 +++------
 xen/arch/x86/hvm/vmx/realmode.c   |   27 +-----------
 xen/arch/x86/hvm/vmx/vmx.c        |   17 ++-----
 xen/arch/x86/mm/shadow/common.c   |   29 ++++++-------
 xen/arch/x86/mm/shadow/multi.c    |   36 ++++++++++------
 xen/include/asm-x86/hvm/support.h |   49 ++++++++++++++++++++--
 9 files changed, 170 insertions(+), 152 deletions(-)

diff -r 1e3e30670ce4 -r e818c24cec03 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Thu Dec 27 12:00:30 2007 +0000
@@ -1251,7 +1251,7 @@ void hvm_task_switch(
         if ( hvm_virtual_to_linear_addr(x86_seg_ss, &reg, regs->esp,
                                         4, hvm_access_write, 32,
                                         &linear_addr) )
-            hvm_copy_to_guest_virt(linear_addr, &errcode, 4);
+            hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4);
     }
 
  out:
@@ -1269,24 +1269,26 @@ void hvm_task_switch(
  *  @fetch = copy is an instruction fetch?
  * Returns number of bytes failed to copy (0 == complete success).
  */
-static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, 
-                      int virt, int fetch)
-{
-    struct segment_register sreg;
+static enum hvm_copy_result __hvm_copy(
+    void *buf, paddr_t addr, int size, int dir, int virt, int fetch)
+{
     unsigned long gfn, mfn;
     p2m_type_t p2mt;
     char *p;
     int count, todo;
     uint32_t pfec = PFEC_page_present;
 
-    hvm_get_segment_register(current, x86_seg_ss, &sreg);
-
-    if ( dir ) 
-        pfec |= PFEC_write_access;
-    if ( sreg.attr.fields.dpl == 3 )
-        pfec |= PFEC_user_mode;
-    if ( fetch ) 
-        pfec |= PFEC_insn_fetch;
+    if ( virt )
+    {
+        struct segment_register sreg;
+        hvm_get_segment_register(current, x86_seg_ss, &sreg);
+        if ( sreg.attr.fields.dpl == 3 )
+            pfec |= PFEC_user_mode;
+        if ( dir ) 
+            pfec |= PFEC_write_access;
+        if ( fetch ) 
+            pfec |= PFEC_insn_fetch;
+    }
 
     todo = size;
     while ( todo > 0 )
@@ -1294,14 +1296,24 @@ static int __hvm_copy(void *buf, paddr_t
         count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
 
         if ( virt )
+        {
             gfn = paging_gva_to_gfn(current, addr, &pfec);
+            if ( gfn == INVALID_GFN )
+            {
+                if ( virt == 2 ) /* 2 means generate a fault */
+                    hvm_inject_exception(TRAP_page_fault, pfec, addr);
+                return HVMCOPY_bad_gva_to_gfn;
+            }
+        }
         else
+        {
             gfn = addr >> PAGE_SHIFT;
-        
+        }
+
         mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
 
         if ( !p2m_is_ram(p2mt) )
-            return todo;
+            return HVMCOPY_bad_gfn_to_mfn;
         ASSERT(mfn_valid(mfn));
 
         p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
@@ -1321,30 +1333,53 @@ static int __hvm_copy(void *buf, paddr_t
         todo -= count;
     }
 
-    return 0;
-}
-
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
+    return HVMCOPY_okay;
+}
+
+enum hvm_copy_result hvm_copy_to_guest_phys(
+    paddr_t paddr, void *buf, int size)
 {
     return __hvm_copy(buf, paddr, size, 1, 0, 0);
 }
 
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
+enum hvm_copy_result hvm_copy_from_guest_phys(
+    void *buf, paddr_t paddr, int size)
 {
     return __hvm_copy(buf, paddr, size, 0, 0, 0);
 }
 
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
+enum hvm_copy_result hvm_copy_to_guest_virt(
+    unsigned long vaddr, void *buf, int size)
+{
+    return __hvm_copy(buf, vaddr, size, 1, 2, 0);
+}
+
+enum hvm_copy_result hvm_copy_from_guest_virt(
+    void *buf, unsigned long vaddr, int size)
+{
+    return __hvm_copy(buf, vaddr, size, 0, 2, 0);
+}
+
+enum hvm_copy_result hvm_fetch_from_guest_virt(
+    void *buf, unsigned long vaddr, int size)
+{
+    return __hvm_copy(buf, vaddr, size, 0, 2, hvm_nx_enabled(current));
+}
+
+enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
+    unsigned long vaddr, void *buf, int size)
 {
     return __hvm_copy(buf, vaddr, size, 1, 1, 0);
 }
 
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
+enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
+    void *buf, unsigned long vaddr, int size)
 {
     return __hvm_copy(buf, vaddr, size, 0, 1, 0);
 }
 
-int hvm_fetch_from_guest_virt(void *buf, unsigned long vaddr, int size)
+enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
+    void *buf, unsigned long vaddr, int size)
 {
     return __hvm_copy(buf, vaddr, size, 0, 1, hvm_nx_enabled(current));
 }
diff -r 1e3e30670ce4 -r e818c24cec03 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/arch/x86/hvm/io.c     Thu Dec 27 12:00:30 2007 +0000
@@ -435,17 +435,8 @@ static void hvm_pio_assist(struct cpu_us
                 if ( hvm_paging_enabled(current) )
                 {
                     int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
-                    if ( rv != 0 )
-                    {
-                        /* Failed on the page-spanning copy.  Inject PF into
-                         * the guest for the address where we failed. */
-                        addr += p->size - rv;
-                        gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
-                                 "of a page-spanning PIO: va=%#lx\n", addr);
-                        hvm_inject_exception(TRAP_page_fault,
-                                             PFEC_write_access, addr);
-                        return;
-                    }
+                    if ( rv == HVMCOPY_bad_gva_to_gfn )
+                        return; /* exception already injected */
                 }
                 else
                     (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
@@ -569,17 +560,8 @@ static void hvm_mmio_assist(struct cpu_u
             if (hvm_paging_enabled(current))
             {
                 int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
-                if ( rv != 0 )
-                {
-                    /* Failed on the page-spanning copy.  Inject PF into
-                     * the guest for the address where we failed. */
-                    addr += p->size - rv;
-                    gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
-                             "a page-spanning MMIO: va=%#lx\n", addr);
-                    hvm_inject_exception(TRAP_page_fault,
-                                         PFEC_write_access, addr);
-                    return;
-                }
+                if ( rv == HVMCOPY_bad_gva_to_gfn )
+                    return; /* exception already injected */
             }
             else
                 (void)hvm_copy_to_guest_phys(addr, &p->data, p->size);
@@ -812,14 +794,8 @@ static void hvm_mmio_assist(struct cpu_u
         {
             unsigned long addr = mmio_opp->addr;
             int rv = hvm_copy_to_guest_virt(addr, &p->data, size);
-            if ( rv != 0 )
-            {
-                addr += p->size - rv;
-                gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO:"
-                         " va=%#lx\n", addr);
-                hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr);
-                return;
-            }
+            if ( rv == HVMCOPY_bad_gva_to_gfn )
+                return; /* exception already injected */
         }
         break;
     }
diff -r 1e3e30670ce4 -r e818c24cec03 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/arch/x86/hvm/platform.c       Thu Dec 27 12:00:30 2007 +0000
@@ -829,11 +829,12 @@ static int mmio_decode(int address_bytes
     }
 }
 
-int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int 
inst_len)
+int inst_copy_from_guest(
+    unsigned char *buf, unsigned long guest_eip, int inst_len)
 {
     if ( inst_len > MAX_INST_LEN || inst_len <= 0 )
         return 0;
-    if ( hvm_fetch_from_guest_virt(buf, guest_eip, inst_len) )
+    if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len) )
         return 0;
     return inst_len;
 }
@@ -1150,21 +1151,11 @@ void handle_mmio(paddr_t gpa)
                 if ( hvm_paging_enabled(v) )
                 {
                     int rv = hvm_copy_from_guest_virt(&value, addr, size);
-                    if ( rv != 0 ) 
-                    {
-                        /* Failed on the page-spanning copy.  Inject PF into
-                         * the guest for the address where we failed */
-                        regs->eip -= inst_len; /* do not advance %eip */
-                        /* Must set CR2 at the failing address */ 
-                        addr += size - rv;
-                        gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
-                                 "page-spanning MMIO: va=%#lx\n", addr);
-                        hvm_inject_exception(TRAP_page_fault, 0, addr);
-                        return;
-                    }
+                    if ( rv == HVMCOPY_bad_gva_to_gfn ) 
+                        return; /* exception already injected */
                 }
                 else
-                    (void) hvm_copy_from_guest_phys(&value, addr, size);
+                    (void)hvm_copy_from_guest_phys(&value, addr, size);
             } else /* dir != IOREQ_WRITE */
                 /* Remember where to write the result, as a *VA*.
                  * Must be a VA so we can handle the page overlap 
@@ -1325,7 +1316,8 @@ unsigned long copy_to_user_hvm(void *to,
         return 0;
     }
 
-    return hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len);
+    return hvm_copy_to_guest_virt_nofault(
+        (unsigned long)to, (void *)from, len);
 }
 
 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
@@ -1336,7 +1328,8 @@ unsigned long copy_from_user_hvm(void *t
         return 0;
     }
 
-    return hvm_copy_from_guest_virt(to, (unsigned long)from, len);
+    return hvm_copy_from_guest_virt_nofault(
+        to, (unsigned long)from, len);
 }
 
 /*
diff -r 1e3e30670ce4 -r e818c24cec03 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Dec 27 12:00:30 2007 +0000
@@ -1468,20 +1468,13 @@ static void svm_io_instruction(struct vc
                 if ( hvm_paging_enabled(current) )
                 {
                     int rv = hvm_copy_from_guest_virt(&value, addr, size);
-                    if ( rv != 0 ) 
-                    {
-                        /* Failed on the page-spanning copy.  Inject PF into
-                         * the guest for the address where we failed. */
-                        addr += size - rv;
-                        gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
-                                 "of a page-spanning PIO: va=%#lx\n", addr);
-                        svm_inject_exception(TRAP_page_fault, 0, addr);
-                        return;
-                    }
+                    if ( rv == HVMCOPY_bad_gva_to_gfn )
+                        return; /* exception already injected */
                 }
                 else
-                    (void) hvm_copy_from_guest_phys(&value, addr, size);
-            } else /* dir != IOREQ_WRITE */
+                    (void)hvm_copy_from_guest_phys(&value, addr, size);
+            }
+            else /* dir != IOREQ_WRITE */
                 /* Remember where to write the result, as a *VA*.
                  * Must be a VA so we can handle the page overlap 
                  * correctly in hvm_pio_assist() */
@@ -1705,7 +1698,8 @@ static void svm_cr_access(
             offset = ( addr_size == 4 ) ? offset : ( offset & 0xFFFF );
             addr = hvm_get_segment_base(v, seg);
             addr += offset;
-            hvm_copy_to_guest_virt(addr,&value,2);
+            result = (hvm_copy_to_guest_virt(addr, &value, 2)
+                      != HVMCOPY_bad_gva_to_gfn);
         }
         else
         {
diff -r 1e3e30670ce4 -r e818c24cec03 xen/arch/x86/hvm/vmx/realmode.c
--- a/xen/arch/x86/hvm/vmx/realmode.c   Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/realmode.c   Thu Dec 27 12:00:30 2007 +0000
@@ -119,21 +119,12 @@ realmode_read(
     struct realmode_emulate_ctxt *rm_ctxt)
 {
     uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
-    int todo;
 
     *val = 0;
-    todo = hvm_copy_from_guest_phys(val, addr, bytes);
-
-    if ( todo )
+
+    if ( hvm_copy_from_guest_phys(val, addr, bytes) )
     {
         struct vcpu *curr = current;
-
-        if ( todo != bytes )
-        {
-            gdprintk(XENLOG_WARNING, "RM: Partial read at %08x (%d/%d)\n",
-                     addr, todo, bytes);
-            return X86EMUL_UNHANDLEABLE;
-        }
 
         if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
             return X86EMUL_UNHANDLEABLE;
@@ -203,20 +194,10 @@ realmode_emulate_write(
     struct realmode_emulate_ctxt *rm_ctxt =
         container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
     uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
-    int todo;
-
-    todo = hvm_copy_to_guest_phys(addr, &val, bytes);
-
-    if ( todo )
+
+    if ( hvm_copy_to_guest_phys(addr, &val, bytes) )
     {
         struct vcpu *curr = current;
-
-        if ( todo != bytes )
-        {
-            gdprintk(XENLOG_WARNING, "RM: Partial write at %08x (%d/%d)\n",
-                     addr, todo, bytes);
-            return X86EMUL_UNHANDLEABLE;
-        }
 
         if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
             return X86EMUL_UNHANDLEABLE;
diff -r 1e3e30670ce4 -r e818c24cec03 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Dec 27 12:00:30 2007 +0000
@@ -1629,20 +1629,13 @@ static void vmx_send_str_pio(struct cpu_
             if ( hvm_paging_enabled(current) )
             {
                 int rv = hvm_copy_from_guest_virt(&value, addr, size);
-                if ( rv != 0 )
-                {
-                    /* Failed on the page-spanning copy.  Inject PF into
-                     * the guest for the address where we failed. */
-                    addr += size - rv;
-                    gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
-                             "of a page-spanning PIO: va=%#lx\n", addr);
-                    vmx_inject_exception(TRAP_page_fault, 0, addr);
-                    return;
-                }
+                if ( rv == HVMCOPY_bad_gva_to_gfn )
+                    return; /* exception already injected */
             }
             else
-                (void) hvm_copy_from_guest_phys(&value, addr, size);
-        } else /* dir != IOREQ_WRITE */
+                (void)hvm_copy_from_guest_phys(&value, addr, size);
+        }
+        else /* dir != IOREQ_WRITE */
             /* Remember where to write the result, as a *VA*.
              * Must be a VA so we can handle the page overlap
              * correctly in hvm_pio_assist() */
diff -r 1e3e30670ce4 -r e818c24cec03 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Dec 27 12:00:30 2007 +0000
@@ -141,9 +141,8 @@ hvm_read(enum x86_segment seg,
          enum hvm_access_type access_type,
          struct sh_emulate_ctxt *sh_ctxt)
 {
-    struct segment_register *sreg;
     unsigned long addr;
-    int rc, errcode;
+    int rc;
 
     rc = hvm_translate_linear_addr(
         seg, offset, bytes, access_type, sh_ctxt, &addr);
@@ -157,19 +156,17 @@ hvm_read(enum x86_segment seg,
     else
         rc = hvm_copy_from_guest_virt(val, addr, bytes);
 
-    if ( rc == 0 ) 
+    switch ( rc )
+    {
+    case HVMCOPY_okay:
         return X86EMUL_OKAY;
-
-    /* If we got here, there was nothing mapped here, or a bad GFN 
-     * was mapped here.  This should never happen: we're here because
-     * of a write fault at the end of the instruction we're emulating. */ 
-    SHADOW_PRINTK("read failed to va %#lx\n", addr);
-    sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
-    errcode = (sreg->attr.fields.dpl == 3) ? PFEC_user_mode : 0;
-    if ( access_type == hvm_access_insn_fetch )
-        errcode |= PFEC_insn_fetch;
-    hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
-    return X86EMUL_EXCEPTION;
+    case HVMCOPY_bad_gva_to_gfn:
+        return X86EMUL_EXCEPTION;
+    default:
+        break;
+    }
+
+    return X86EMUL_UNHANDLEABLE;
 }
 
 static int
@@ -399,7 +396,7 @@ struct x86_emulate_ops *shadow_init_emul
         (!hvm_translate_linear_addr(
             x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
-         !hvm_fetch_from_guest_virt(
+         !hvm_fetch_from_guest_virt_nofault(
              sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
@@ -427,7 +424,7 @@ void shadow_continue_emulation(struct sh
                 (!hvm_translate_linear_addr(
                     x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
                     hvm_access_insn_fetch, sh_ctxt, &addr) &&
-                 !hvm_fetch_from_guest_virt(
+                 !hvm_fetch_from_guest_virt_nofault(
                      sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
                 ? sizeof(sh_ctxt->insn_buf) : 0;
             sh_ctxt->insn_buf_eip = regs->eip;
diff -r 1e3e30670ce4 -r e818c24cec03 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Dec 27 12:00:30 2007 +0000
@@ -3984,6 +3984,8 @@ int sh_remove_l3_shadow(struct vcpu *v, 
 /* Handling HVM guest writes to pagetables  */
 
 /* Translate a VA to an MFN, injecting a page-fault if we fail */
+#define BAD_GVA_TO_GFN (~0UL)
+#define BAD_GFN_TO_MFN (~1UL)
 static mfn_t emulate_gva_to_mfn(struct vcpu *v,
                                 unsigned long vaddr,
                                 struct sh_emulate_ctxt *sh_ctxt)
@@ -4001,7 +4003,7 @@ static mfn_t emulate_gva_to_mfn(struct v
             hvm_inject_exception(TRAP_page_fault, pfec, vaddr);
         else
             propagate_page_fault(vaddr, pfec);
-        return _mfn(INVALID_MFN);
+        return _mfn(BAD_GVA_TO_GFN);
     }
 
     /* Translate the GFN to an MFN */
@@ -4013,11 +4015,14 @@ static mfn_t emulate_gva_to_mfn(struct v
         return mfn;
     }
  
-    return _mfn(INVALID_MFN);
+    return _mfn(BAD_GFN_TO_MFN);
 }
 
 /* Check that the user is allowed to perform this write. 
  * Returns a mapped pointer to write to, or NULL for error. */
+#define MAPPING_UNHANDLEABLE ((void *)0)
+#define MAPPING_EXCEPTION    ((void *)1)
+#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 1)
 static void *emulate_map_dest(struct vcpu *v,
                               unsigned long vaddr,
                               u32 bytes,
@@ -4030,11 +4035,12 @@ static void *emulate_map_dest(struct vcp
     /* We don't emulate user-mode writes to page tables */
     sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
     if ( sreg->attr.fields.dpl == 3 )
-        return NULL;
+        return MAPPING_UNHANDLEABLE;
 
     sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
     if ( !mfn_valid(sh_ctxt->mfn1) ) 
-        return NULL;
+        return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
+                MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE);
 
     /* Unaligned writes mean probably this isn't a pagetable */
     if ( vaddr & (bytes - 1) )
@@ -4051,13 +4057,14 @@ static void *emulate_map_dest(struct vcp
         /* Cross-page emulated writes are only supported for HVM guests; 
          * PV guests ought to know better */
         if ( !is_hvm_vcpu(v) )
-            return NULL;
+            return MAPPING_UNHANDLEABLE;
 
         /* This write crosses a page boundary.  Translate the second page */
         sh_ctxt->mfn2 = emulate_gva_to_mfn(v, (vaddr + bytes - 1) & PAGE_MASK,
                                            sh_ctxt);
         if ( !mfn_valid(sh_ctxt->mfn2) ) 
-            return NULL;
+            return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
+                    MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE);
 
         /* Cross-page writes mean probably not a pagetable */
         sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
@@ -4075,7 +4082,7 @@ static void *emulate_map_dest(struct vcp
         flush_tlb_local();
         map += (vaddr & ~PAGE_MASK);
     }
-    
+
 #if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
     /* Remember if the bottom bit was clear, so we can choose not to run
      * the change through the verify code if it's still clear afterwards */
@@ -4172,10 +4179,11 @@ sh_x86_emulate_write(struct vcpu *v, uns
 
     shadow_lock(v->domain);
     addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt);
-    if ( addr == NULL )
+    if ( emulate_map_dest_failed(addr) )
     {
         shadow_unlock(v->domain);
-        return X86EMUL_EXCEPTION;
+        return ((addr == MAPPING_EXCEPTION) ?
+                X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
     }
 
     memcpy(addr, src, bytes);
@@ -4202,10 +4210,11 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     shadow_lock(v->domain);
 
     addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt);
-    if ( addr == NULL )
+    if ( emulate_map_dest_failed(addr) )
     {
         shadow_unlock(v->domain);
-        return X86EMUL_EXCEPTION;
+        return ((addr == MAPPING_EXCEPTION) ?
+                X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
     }
 
     switch ( bytes )
@@ -4249,10 +4258,11 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
     shadow_lock(v->domain);
 
     addr = emulate_map_dest(v, vaddr, 8, sh_ctxt);
-    if ( addr == NULL )
+    if ( emulate_map_dest_failed(addr) )
     {
         shadow_unlock(v->domain);
-        return X86EMUL_EXCEPTION;
+        return ((addr == MAPPING_EXCEPTION) ?
+                X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
     }
 
     old = (((u64) old_hi) << 32) | (u64) old_lo;
diff -r 1e3e30670ce4 -r e818c24cec03 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Thu Dec 27 10:41:43 2007 +0000
+++ b/xen/include/asm-x86/hvm/support.h Thu Dec 27 12:00:30 2007 +0000
@@ -82,11 +82,50 @@ extern char hvm_io_bitmap[];
 
 void hvm_enable(struct hvm_function_table *);
 
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
-int hvm_fetch_from_guest_virt(void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result {
+    HVMCOPY_okay = 0,
+    HVMCOPY_bad_gva_to_gfn,
+    HVMCOPY_bad_gfn_to_mfn
+};
+
+/*
+ * Copy to/from a guest physical address.
+ * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
+ * address range does not map entirely onto ordinary machine memory.
+ */
+enum hvm_copy_result hvm_copy_to_guest_phys(
+    paddr_t paddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_phys(
+    void *buf, paddr_t paddr, int size);
+
+/*
+ * Copy to/from a guest virtual address.
+ * Returns:
+ *  HVMCOPY_okay: Copy was entirely successful.
+ *  HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
+ *                          ordinary machine memory.
+ *  HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
+ *                          mapping to a guest physical address. In this case
+ *                          a page fault exception is automatically queued
+ *                          for injection into the current HVM VCPU.
+ */
+enum hvm_copy_result hvm_copy_to_guest_virt(
+    unsigned long vaddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_virt(
+    void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result hvm_fetch_from_guest_virt(
+    void *buf, unsigned long vaddr, int size);
+
+/*
+ * As above (copy to/from a guest virtual address), but no fault is generated
+ * when HVMCOPY_bad_gva_to_gfn is returned.
+ */
+enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
+    unsigned long vaddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
+    void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
+    void *buf, unsigned long vaddr, int size);
 
 void hvm_print_line(struct vcpu *v, const char c);
 void hlt_timer_fn(void *data);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.