[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: Introduce get_page_from_gfn().


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Fri, 18 May 2012 17:11:15 +0000
  • Delivery-date: Fri, 18 May 2012 17:11:20 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1337246694 -3600
# Node ID 94f50850e650895f794b21eac7d8c01b8f336fbc
# Parent  278328168798533d9a9c1dfc18b984c08ba64443
x86/mm: Introduce get_page_from_gfn().

This new function does a p2m lookup under the read lock, falling back
to the write lock only if it needs to make a change.  If the GFN is
backed by RAM, it takes a refcount on the underlying page.

The following patches will convert many paths that currently use
get_gfn/put_gfn to use the new interface.  That will avoid serializing
p2m accesses in the common case where no updates are needed (i.e. no
page-sharing, VM paging or other p2m trickery).

Signed-off-by: Tim Deegan <tim@xxxxxxx>
---


diff -r 278328168798 -r 94f50850e650 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Thu May 17 10:24:53 2012 +0100
+++ b/xen/arch/x86/mm/p2m.c     Thu May 17 10:24:54 2012 +0100
@@ -207,6 +207,59 @@ void __put_gfn(struct p2m_domain *p2m, u
     gfn_unlock(p2m, gfn, 0);
 }
 
+/* Atomically look up a GFN and take a reference count on the backing page. */
+struct page_info *get_page_from_gfn_p2m(
+    struct domain *d, struct p2m_domain *p2m, unsigned long gfn,
+    p2m_type_t *t, p2m_access_t *a, p2m_query_t q)
+{
+    struct page_info *page = NULL;
+    p2m_access_t _a;
+    p2m_type_t _t;
+    mfn_t mfn;
+
+    /* Allow t or a to be NULL */
+    t = t ?: &_t;
+    a = a ?: &_a;
+
+    if ( likely(!p2m_locked_by_me(p2m)) )
+    {
+        /* Fast path: look up and get out */
+        p2m_read_lock(p2m);
+        mfn = __get_gfn_type_access(p2m, gfn, t, a, 0, NULL, 0);
+        if ( (p2m_is_ram(*t) || p2m_is_grant(*t))
+             && mfn_valid(mfn)
+             && !((q & P2M_UNSHARE) && p2m_is_shared(*t)) )
+        {
+            page = mfn_to_page(mfn);
+            if ( !get_page(page, d)
+                 /* Page could be shared */
+                 && !get_page(page, dom_cow) )
+                page = NULL;
+        }
+        p2m_read_unlock(p2m);
+
+        if ( page )
+            return page;
+
+        /* Error path: not a suitable GFN at all */
+        if ( !p2m_is_ram(*t) && !p2m_is_paging(*t) && !p2m_is_magic(*t) )
+            return NULL;
+    }
+
+    /* Slow path: take the write lock and do fixups */
+    mfn = get_gfn_type_access(p2m, gfn, t, a, q, NULL);
+    if ( p2m_is_ram(*t) && mfn_valid(mfn) )
+    {
+        page = mfn_to_page(mfn);
+        if ( !get_page(page, d) )
+            page = NULL;
+    }
+    put_gfn(d, gfn);
+
+    return page;
+}
+
+
 int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
                   unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
diff -r 278328168798 -r 94f50850e650 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu May 17 10:24:53 2012 +0100
+++ b/xen/include/asm-x86/p2m.h Thu May 17 10:24:54 2012 +0100
@@ -377,6 +377,33 @@ static inline mfn_t get_gfn_query_unlock
     return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0);
 }
 
+/* Atomically look up a GFN and take a reference count on the backing page.
+ * This makes sure the page doesn't get freed (or shared) underfoot,
+ * and should be used by any path that intends to write to the backing page.
+ * Returns NULL if the page is not backed by RAM.
+ * The caller is responsible for calling put_page() afterwards. */
+struct page_info *get_page_from_gfn_p2m(struct domain *d,
+                                        struct p2m_domain *p2m,
+                                        unsigned long gfn,
+                                        p2m_type_t *t, p2m_access_t *a,
+                                        p2m_query_t q);
+
+static inline struct page_info *get_page_from_gfn(
+    struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+{
+    struct page_info *page;
+
+    if ( paging_mode_translate(d) )
+        return get_page_from_gfn_p2m(d, p2m_get_hostp2m(d), gfn, t, NULL, q);
+
+    /* Non-translated guests see 1-1 RAM mappings everywhere */
+    if (t)
+        *t = p2m_ram_rw;
+    page = __mfn_to_page(gfn);
+    return get_page(page, d) ? page : NULL;
+}
+
+
 /* General conversion function from mfn to gfn */
 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
 {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.