[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] PoD memory 2/9: calls to gfn_to_mfn_query()



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1231152159 0
# Node ID 629f028d22f9885cee83a7eac23ff0f78155bc38
# Parent  0cd1ba8bd7cd4b91719503f102bc4f076aeb40c4
PoD memory 2/9: calls to gfn_to_mfn_query()

Shadow code, and other important places, call gfn_to_mfn_query().  In
particular, any place that holds the shadow lock must make a query
call.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c     |    2 +-
 xen/arch/x86/hvm/vmx/vmx.c     |   23 +++++++++++++----------
 xen/arch/x86/mm/p2m.c          |   10 +++++-----
 xen/arch/x86/mm/shadow/multi.c |   35 ++++++++++++++++++++---------------
 xen/arch/x86/mm/shadow/types.h |    6 ++++++
 5 files changed, 45 insertions(+), 31 deletions(-)

diff -r 0cd1ba8bd7cd -r 629f028d22f9 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Jan 05 10:41:48 2009 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Mon Jan 05 10:42:39 2009 +0000
@@ -888,7 +888,7 @@ static void svm_do_nested_pgfault(paddr_
      * If this GFN is emulated MMIO or marked as read-only, pass the fault
      * to the mmio handler.
      */
-    mfn = gfn_to_mfn_current(gfn, &p2mt);
+    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
     if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) )
     {
         if ( !handle_mmio() )
diff -r 0cd1ba8bd7cd -r 629f028d22f9 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Jan 05 10:41:48 2009 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Jan 05 10:42:39 2009 +0000
@@ -2124,9 +2124,9 @@ static void ept_handle_violation(unsigne
     mfn_t mfn;
     p2m_type_t t;
 
-    mfn = gfn_to_mfn(d, gfn, &t);
-
-    /* There are two legitimate reasons for taking an EPT violation. 
+    mfn = gfn_to_mfn_guest(d, gfn, &t);
+
+    /* There are three legitimate reasons for taking an EPT violation. 
      * One is a guest access to MMIO space. */
     if ( gla_validity == EPT_GLA_VALIDITY_MATCH && p2m_is_mmio(t) )
     {
@@ -2134,15 +2134,18 @@ static void ept_handle_violation(unsigne
         return;
     }
 
-    /* The other is log-dirty mode, writing to a read-only page */
-    if ( paging_mode_log_dirty(d)
-         && (gla_validity == EPT_GLA_VALIDITY_MATCH
-             || gla_validity == EPT_GLA_VALIDITY_GPT_WALK)
+    /* The second is log-dirty mode, writing to a read-only page;
+     * The third is populating a populate-on-demand page. */
+    if ( (gla_validity == EPT_GLA_VALIDITY_MATCH
+          || gla_validity == EPT_GLA_VALIDITY_GPT_WALK)
          && p2m_is_ram(t) && (t != p2m_ram_ro) )
     {
-        paging_mark_dirty(d, mfn_x(mfn));
-        p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
-        flush_tlb_mask(d->domain_dirty_cpumask);
+        if ( paging_mode_log_dirty(d) )
+        {
+            paging_mark_dirty(d, mfn_x(mfn));
+            p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
+            flush_tlb_mask(d->domain_dirty_cpumask);
+        }
         return;
     }
 
diff -r 0cd1ba8bd7cd -r 629f028d22f9 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Mon Jan 05 10:41:48 2009 +0000
+++ b/xen/arch/x86/mm/p2m.c     Mon Jan 05 10:42:39 2009 +0000
@@ -732,7 +732,7 @@ static void audit_p2m(struct domain *d)
             continue;
         }
 
-        p2mfn = gfn_to_mfn_foreign(d, gfn, &type);
+        p2mfn = gfn_to_mfn_type_foreign(d, gfn, &type, p2m_query);
         if ( mfn_x(p2mfn) != mfn )
         {
             mpbad++;
@@ -750,7 +750,7 @@ static void audit_p2m(struct domain *d)
 
         if ( test_linear && (gfn <= d->arch.p2m->max_mapped_pfn) )
         {
-            lp2mfn = mfn_x(gfn_to_mfn(d, gfn, &type));
+            lp2mfn = mfn_x(gfn_to_mfn_query(d, gfn, &type));
             if ( lp2mfn != mfn_x(p2mfn) )
             {
                 P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "
@@ -960,7 +960,7 @@ guest_physmap_add_entry(struct domain *d
     /* First, remove m->p mappings for existing p->m mappings */
     for ( i = 0; i < (1UL << page_order); i++ )
     {
-        omfn = gfn_to_mfn(d, gfn + i, &ot);
+        omfn = gfn_to_mfn_query(d, gfn + i, &ot);
         if ( p2m_is_ram(ot) )
         {
             ASSERT(mfn_valid(omfn));
@@ -985,7 +985,7 @@ guest_physmap_add_entry(struct domain *d
              * address */
             P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
                       mfn + i, ogfn, gfn + i);
-            omfn = gfn_to_mfn(d, ogfn, &ot);
+            omfn = gfn_to_mfn_query(d, ogfn, &ot);
             if ( p2m_is_ram(ot) )
             {
                 ASSERT(mfn_valid(omfn));
@@ -1154,7 +1154,7 @@ set_mmio_p2m_entry(struct domain *d, uns
     if ( !paging_mode_translate(d) )
         return 0;
 
-    omfn = gfn_to_mfn(d, gfn, &ot);
+    omfn = gfn_to_mfn_query(d, gfn, &ot);
     if ( p2m_is_ram(ot) )
     {
         ASSERT(mfn_valid(omfn));
diff -r 0cd1ba8bd7cd -r 629f028d22f9 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Mon Jan 05 10:41:48 2009 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Mon Jan 05 10:42:39 2009 +0000
@@ -2170,7 +2170,7 @@ static int validate_gl4e(struct vcpu *v,
     if ( guest_l4e_get_flags(new_gl4e) & _PAGE_PRESENT )
     {
         gfn_t gl3gfn = guest_l4e_get_gfn(new_gl4e);
-        mfn_t gl3mfn = gfn_to_mfn(d, gl3gfn, &p2mt);
+        mfn_t gl3mfn = gfn_to_mfn_query(d, gl3gfn, &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl3mfn = get_shadow_status(v, gl3mfn, SH_type_l3_shadow);
         else
@@ -2227,7 +2227,7 @@ static int validate_gl3e(struct vcpu *v,
     if ( guest_l3e_get_flags(new_gl3e) & _PAGE_PRESENT )
     {
         gfn_t gl2gfn = guest_l3e_get_gfn(new_gl3e);
-        mfn_t gl2mfn = gfn_to_mfn(v->domain, gl2gfn, &p2mt);
+        mfn_t gl2mfn = gfn_to_mfn_query(v->domain, gl2gfn, &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl2mfn = get_shadow_status(v, gl2mfn, SH_type_l2_shadow);
         else
@@ -2276,7 +2276,7 @@ static int validate_gl2e(struct vcpu *v,
         }
         else
         {
-            mfn_t gl1mfn = gfn_to_mfn(v->domain, gl1gfn, &p2mt);
+            mfn_t gl1mfn = gfn_to_mfn_query(v->domain, gl1gfn, &p2mt);
             if ( p2m_is_ram(p2mt) )
                 sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
             else
@@ -2346,7 +2346,7 @@ static int validate_gl1e(struct vcpu *v,
     perfc_incr(shadow_validate_gl1e_calls);
 
     gfn = guest_l1e_get_gfn(new_gl1e);
-    gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+    gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
 
     l1e_propagate_from_guest(v, new_gl1e, gmfn, &new_sl1e, ft_prefetch, p2mt);
     result |= shadow_set_l1e(v, sl1p, new_sl1e, sl1mfn);
@@ -2406,7 +2406,7 @@ void sh_resync_l1(struct vcpu *v, mfn_t 
             shadow_l1e_t nsl1e;
 
             gfn = guest_l1e_get_gfn(gl1e);
-            gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+            gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
             l1e_propagate_from_guest(v, gl1e, gmfn, &nsl1e, ft_prefetch, p2mt);
             rc |= shadow_set_l1e(v, sl1p, nsl1e, sl1mfn);
             
@@ -2723,7 +2723,7 @@ static void sh_prefetch(struct vcpu *v, 
 
         /* Look at the gfn that the l1e is pointing at */
         gfn = guest_l1e_get_gfn(gl1e);
-        gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+        gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
 
         /* Propagate the entry.  */
         l1e_propagate_from_guest(v, gl1e, gmfn, &sl1e, ft_prefetch, p2mt);
@@ -3079,7 +3079,7 @@ static int sh_page_fault(struct vcpu *v,
 
     /* What mfn is the guest trying to access? */
     gfn = guest_l1e_get_gfn(gw.l1e);
-    gmfn = gfn_to_mfn(d, gfn, &p2mt);
+    gmfn = gfn_to_mfn_guest(d, gfn, &p2mt);
 
     if ( shadow_mode_refcounts(d) && 
          (!p2m_is_valid(p2mt) || (!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) )
@@ -4119,7 +4119,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
-                gl2mfn = gfn_to_mfn(d, gl2gfn, &p2mt);
+                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
                 if ( p2m_is_ram(p2mt) )
                     flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
             }
@@ -4132,7 +4132,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
             if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
             {
                 gl2gfn = guest_l3e_get_gfn(gl3e[i]);
-                gl2mfn = gfn_to_mfn(d, gl2gfn, &p2mt);
+                gl2mfn = gfn_to_mfn_query(d, gl2gfn, &p2mt);
                 if ( p2m_is_ram(p2mt) )
                     sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3) 
                                            ? SH_type_l2h_shadow 
@@ -4518,7 +4518,12 @@ static mfn_t emulate_gva_to_mfn(struct v
     }
 
     /* Translate the GFN to an MFN */
-    mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
+    /* PoD: query only if shadow lock is held (to avoid deadlock) */
+    if ( shadow_locked_by_me(v->domain) )
+        mfn = gfn_to_mfn_query(v->domain, _gfn(gfn), &p2mt);
+    else
+        mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
+        
     if ( p2mt == p2m_ram_ro )
         return _mfn(READONLY_GFN);
     if ( !p2m_is_ram(p2mt) )
@@ -4922,7 +4927,7 @@ int sh_audit_l1_table(struct vcpu *v, mf
             {
                 gfn = guest_l1e_get_gfn(*gl1e);
                 mfn = shadow_l1e_get_mfn(*sl1e);
-                gmfn = gfn_to_mfn(v->domain, gfn, &p2mt);
+                gmfn = gfn_to_mfn_query(v->domain, gfn, &p2mt);
                 if ( mfn_x(gmfn) != mfn_x(mfn) )
                     AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
                                " --> %" PRI_mfn " != mfn %" PRI_mfn,
@@ -4989,7 +4994,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
             mfn = shadow_l2e_get_mfn(*sl2e);
             gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)  
                 ? get_fl1_shadow_status(v, gfn)
-                : get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt), 
+                : get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, 
&p2mt), 
                                     SH_type_l1_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
@@ -4997,7 +5002,7 @@ int sh_audit_l2_table(struct vcpu *v, mf
                            " --> %" PRI_mfn " != mfn %" PRI_mfn,
                            gfn_x(gfn), 
                            (guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
-                           : mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt)),
+                           : mfn_x(gfn_to_mfn_query(v->domain, gfn, &p2mt)),
                            mfn_x(gmfn), mfn_x(mfn));
         }
     });
@@ -5036,7 +5041,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
         {
             gfn = guest_l3e_get_gfn(*gl3e);
             mfn = shadow_l3e_get_mfn(*sl3e);
-            gmfn = get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt), 
+            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, 
&p2mt), 
                                      ((GUEST_PAGING_LEVELS == 3 ||
                                        is_pv_32on64_vcpu(v))
                                       && !shadow_mode_external(v->domain)
@@ -5083,7 +5088,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
         {
             gfn = guest_l4e_get_gfn(*gl4e);
             mfn = shadow_l4e_get_mfn(*sl4e);
-            gmfn = get_shadow_status(v, gfn_to_mfn(v->domain, gfn, &p2mt), 
+            gmfn = get_shadow_status(v, gfn_to_mfn_query(v->domain, gfn, 
&p2mt), 
                                      SH_type_l3_shadow);
             if ( mfn_x(gmfn) != mfn_x(mfn) )
                 AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
diff -r 0cd1ba8bd7cd -r 629f028d22f9 xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Mon Jan 05 10:41:48 2009 +0000
+++ b/xen/arch/x86/mm/shadow/types.h    Mon Jan 05 10:42:39 2009 +0000
@@ -190,6 +190,12 @@ static inline shadow_l4e_t shadow_l4e_fr
       shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
 })
 #endif
+
+ /* Override gfn_to_mfn to work with gfn_t */
+#undef gfn_to_mfn_query
+#define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), 
p2m_query)
+#undef gfn_to_mfn_guest
+#define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), 
p2m_guest)
 
 /* The shadow types needed for the various levels. */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.