[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: Refactor p2m get_entry accessor



# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1320923555 0
# Node ID d0bbe0622d1d8db2d11b722322287945d16d5982
# Parent  3e726d259039e31eed07ff0ad63b77ab419125ac
x86/mm: Refactor p2m get_entry accessor

Move the main query accessor to the p2m outside of an inline and into the
p2m code itself. This will allow for p2m internal locking to be added
to the accessor later.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 3e726d259039 -r d0bbe0622d1d xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu Nov 10 11:12:35 2011 +0000
+++ b/xen/arch/x86/mm/p2m.c     Thu Nov 10 11:12:35 2011 +0000
@@ -144,6 +144,44 @@
     p2m_unlock(p2m);
 }
 
+mfn_t gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
+                    p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
+                    unsigned int *page_order)
+{
+    mfn_t mfn;
+
+    if ( !p2m || !paging_mode_translate(p2m->domain) )
+    {
+        /* Not necessarily true, but for non-translated guests, we claim
+         * it's the most generic kind of memory */
+        *t = p2m_ram_rw;
+        return _mfn(gfn);
+    }
+
+    mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
+
+#ifdef __x86_64__
+    if ( q == p2m_unshare && p2m_is_shared(*t) )
+    {
+        ASSERT(!p2m_is_nestedp2m(p2m));
+        mem_sharing_unshare_page(p2m->domain, gfn, 0);
+        mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
+    }
+#endif
+
+#ifdef __x86_64__
+    if (unlikely((p2m_is_broken(*t))))
+    {
+        /* Return invalid_mfn to avoid caller's access */
+        mfn = _mfn(INVALID_MFN);
+        if (q == p2m_guest)
+            domain_crash(p2m->domain);
+    }
+#endif
+
+    return mfn;
+}
+
 int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
                   unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
diff -r 3e726d259039 -r d0bbe0622d1d xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Nov 10 11:12:35 2011 +0000
+++ b/xen/include/asm-x86/p2m.h Thu Nov 10 11:12:35 2011 +0000
@@ -313,45 +313,9 @@
  * If the lookup succeeds, the return value is != INVALID_MFN and 
  * *page_order is filled in with the order of the superpage (if any) that
  * the entry was found in.  */
-static inline mfn_t
-gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
+mfn_t gfn_to_mfn_type_p2m(struct p2m_domain *p2m, unsigned long gfn,
                     p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
-                    unsigned int *page_order)
-{
-    mfn_t mfn;
-
-    if ( !p2m || !paging_mode_translate(p2m->domain) )
-    {
-        /* Not necessarily true, but for non-translated guests, we claim
-         * it's the most generic kind of memory */
-        *t = p2m_ram_rw;
-        return _mfn(gfn);
-    }
-
-    mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
-
-#ifdef __x86_64__
-    if ( q == p2m_unshare && p2m_is_shared(*t) )
-    {
-        ASSERT(!p2m_is_nestedp2m(p2m));
-        mem_sharing_unshare_page(p2m->domain, gfn, 0);
-        mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order);
-    }
-#endif
-
-#ifdef __x86_64__
-    if (unlikely((p2m_is_broken(*t))))
-    {
-        /* Return invalid_mfn to avoid caller's access */
-        mfn = _mfn(INVALID_MFN);
-        if (q == p2m_guest)
-            domain_crash(p2m->domain);
-    }
-#endif
-
-    return mfn;
-}
-
+                    unsigned int *page_order);
 
 /* General conversion function from gfn to mfn */
 static inline mfn_t gfn_to_mfn_type(struct domain *d,

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.