[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86 hvm: Factor out hvm_map_guest_frame_{rw, ro} from hvm_map_entry



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1286359219 -3600
# Node ID 02e199c96ece5d2853514a14ce7407fd8167ae47
# Parent  368957d8b063273d3bad38b122d15b5b8fda29ec
x86 hvm: Factor out hvm_map_guest_frame_{rw,ro} from hvm_map_entry

This allows us to map pages from guest physical addresses.

This will be used with nested virtualization.

Signed-off-by: Uwe Dannowski <Uwe.Dannowski@xxxxxxx>
Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/hvm/hvm.c        |   99 +++++++++++++++++++++++++++---------------
 xen/include/asm-x86/hvm/hvm.h |    4 +
 2 files changed, 68 insertions(+), 35 deletions(-)

diff -r 368957d8b063 -r 02e199c96ece xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Oct 05 17:51:28 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Oct 06 11:00:19 2010 +0100
@@ -1356,55 +1356,84 @@ int hvm_virtual_to_linear_addr(
     return 0;
 }
 
+static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
+{
+    unsigned long mfn;
+    p2m_type_t p2mt;
+    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
+
+    mfn = mfn_x(writable
+                ? gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0)
+                : gfn_to_mfn(p2m, gfn, &p2mt));
+    if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) )
+        return NULL;
+    if ( p2m_is_paging(p2mt) )
+    {
+        p2m_mem_paging_populate(p2m, gfn);
+        return NULL;
+    }
+
+    ASSERT(mfn_valid(mfn));
+
+    if ( writable )
+        paging_mark_dirty(current->domain, mfn);
+
+    return map_domain_page(mfn);
+}
+
+void *hvm_map_guest_frame_rw(unsigned long gfn)
+{
+    return __hvm_map_guest_frame(gfn, 1);
+}
+
+void *hvm_map_guest_frame_ro(unsigned long gfn)
+{
+    return __hvm_map_guest_frame(gfn, 0);
+}
+
+void hvm_unmap_guest_frame(void *p)
+{
+    if ( p )
+        unmap_domain_page(p);
+}
+
 static void *hvm_map_entry(unsigned long va)
 {
-    unsigned long gfn, mfn;
-    p2m_type_t p2mt;
+    unsigned long gfn;
     uint32_t pfec;
-    struct vcpu *v = current;
-    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
+    char *v;
 
     if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE )
     {
         gdprintk(XENLOG_ERR, "Descriptor table entry "
                  "straddles page boundary\n");
-        domain_crash(current->domain);
-        return NULL;
-    }
-
-    /* We're mapping on behalf of the segment-load logic, which might
-     * write the accessed flags in the descriptors (in 32-bit mode), but
-     * we still treat it as a kernel-mode read (i.e. no access checks). */
+        goto fail;
+    }
+
+    /*
+     * We're mapping on behalf of the segment-load logic, which might write
+     * the accessed flags in the descriptors (in 32-bit mode), but we still
+     * treat it as a kernel-mode read (i.e. no access checks).
+     */
     pfec = PFEC_page_present;
     gfn = paging_gva_to_gfn(current, va, &pfec);
-    if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
-        return NULL;
-    mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0));
-    if ( p2m_is_paging(p2mt) )
-    {
-        p2m_mem_paging_populate(p2m, gfn);
-        return NULL;
-    }
-    if ( p2m_is_shared(p2mt) )
-        return NULL;
-    if ( !p2m_is_ram(p2mt) )
-    {
-        gdprintk(XENLOG_ERR, "Failed to look up descriptor table entry\n");
-        domain_crash(current->domain);
-        return NULL;
-    }
-
-    ASSERT(mfn_valid(mfn));
-
-    paging_mark_dirty(current->domain, mfn);
-
-    return (char *)map_domain_page(mfn) + (va & ~PAGE_MASK);
+    if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) )
+        goto fail;
+
+    v = hvm_map_guest_frame_rw(gfn);
+    if ( v == NULL )
+        goto fail;
+
+    return v + (va & ~PAGE_MASK);
+
+ fail:
+    domain_crash(current->domain);
+    return NULL;
 }
 
 static void hvm_unmap_entry(void *p)
 {
-    if ( p )
-        unmap_domain_page(p);
+    hvm_unmap_guest_frame(p);
 }
 
 static int hvm_load_segment_selector(
diff -r 368957d8b063 -r 02e199c96ece xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Tue Oct 05 17:51:28 2010 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Oct 06 11:00:19 2010 +0100
@@ -344,6 +344,10 @@ int hvm_virtual_to_linear_addr(
     unsigned int addr_size,
     unsigned long *linear_addr);
 
+void *hvm_map_guest_frame_rw(unsigned long gfn);
+void *hvm_map_guest_frame_ro(unsigned long gfn);
+void hvm_unmap_guest_frame(void *p);
+
 static inline void hvm_set_info_guest(struct vcpu *v)
 {
     if ( hvm_funcs.set_info_guest )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.