[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Clean up get_page_from_l1e() to correctly distinguish between



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1244036434 -3600
# Node ID c4b048ce6a4b61111382ce37c6afaeed0234282e
# Parent  07cf79dfb59c8a8eb03b7da9fdf9860f3da8e5cc
x86: Clean up get_page_from_l1e() to correctly distinguish between
owner-of-pte and owner-of-data-page in all cases.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/mm.c              |   81 ++++++++++++++++++++---------------------
 xen/arch/x86/mm/shadow/multi.c |    4 +-
 xen/include/asm-x86/mm.h       |    5 +-
 3 files changed, 46 insertions(+), 44 deletions(-)

diff -r 07cf79dfb59c -r c4b048ce6a4b xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Jun 03 12:59:44 2009 +0100
+++ b/xen/arch/x86/mm.c Wed Jun 03 14:40:34 2009 +0100
@@ -726,66 +726,69 @@ static void update_xen_mappings(unsigned
 #endif
 }
 
-
 int
 get_page_from_l1e(
-    l1_pgentry_t l1e, struct domain *d)
+    l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner)
 {
     unsigned long mfn = l1e_get_pfn(l1e);
     struct page_info *page = mfn_to_page(mfn);
     uint32_t l1f = l1e_get_flags(l1e);
     struct vcpu *curr = current;
-    struct domain *owner;
+    struct domain *real_pg_owner;
 
     if ( !(l1f & _PAGE_PRESENT) )
         return 1;
 
-    if ( unlikely(l1f & l1_disallow_mask(d)) )
-    {
-        MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(d));
+    if ( unlikely(l1f & l1_disallow_mask(l1e_owner)) )
+    {
+        MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(l1e_owner));
         return 0;
     }
 
     if ( !mfn_valid(mfn) ||
-         (owner = page_get_owner_and_reference(page)) == dom_io )
+         (real_pg_owner = page_get_owner_and_reference(page)) == dom_io )
     {
         /* Only needed the reference to confirm dom_io ownership. */
         if ( mfn_valid(mfn) )
             put_page(page);
 
         /* DOMID_IO reverts to caller for privilege checks. */
-        if ( d == dom_io )
-            d = curr->domain;
-
-        if ( !iomem_access_permitted(d, mfn, mfn) )
+        if ( pg_owner == dom_io )
+            pg_owner = curr->domain;
+
+        if ( !iomem_access_permitted(pg_owner, mfn, mfn) )
         {
             if ( mfn != (PADDR_MASK >> PAGE_SHIFT) ) /* INVALID_MFN? */
                 MEM_LOG("Non-privileged (%u) attempt to map I/O space %08lx", 
-                        d->domain_id, mfn);
+                        pg_owner->domain_id, mfn);
             return 0;
         }
 
         return 1;
     }
 
-    if ( owner == NULL )
+    if ( real_pg_owner == NULL )
         goto could_not_pin;
 
-    /*
-     * Let privileged domains transfer the right to map their target
-     * domain's pages. This is used to allow stub-domain pvfb export to dom0,
-     * until pvfb supports granted mappings. At that time this minor hack
-     * can go away.
-     */
-    if ( unlikely(d != owner) && (d != curr->domain) && IS_PRIV_FOR(d, owner) )
-        d = owner;
+    if ( unlikely(real_pg_owner != pg_owner) )
+    {
+        /*
+         * Let privileged domains transfer the right to map their target
+         * domain's pages. This is used to allow stub-domain pvfb export to
+         * dom0, until pvfb supports granted mappings. At that time this
+         * minor hack can go away.
+         */
+        if ( (pg_owner == l1e_owner) || !IS_PRIV_FOR(pg_owner, real_pg_owner) )
+            goto could_not_pin;
+        pg_owner = real_pg_owner;
+    }
 
     /* Foreign mappings into guests in shadow external mode don't
      * contribute to writeable mapping refcounts.  (This allows the
      * qemu-dm helper process in dom0 to map the domain's memory without
      * messing up the count of "real" writable mappings.) */
     if ( (l1f & _PAGE_RW) &&
-         !(paging_mode_external(d) && (d != curr->domain)) &&
+         ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) &&
          !get_page_type(page, PGT_writable_page) )
         goto could_not_pin;
 
@@ -798,8 +801,7 @@ get_page_from_l1e(
         if ( is_xen_heap_page(page) )
         {
             if ( (l1f & _PAGE_RW) &&
-                 !(unlikely(paging_mode_external(d) &&
-                            (d != curr->domain))) )
+                 ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) )
                 put_page_type(page);
             put_page(page);
             MEM_LOG("Attempt to change cache attributes of Xen heap page");
@@ -820,10 +822,10 @@ get_page_from_l1e(
 
  could_not_pin:
     MEM_LOG("Error getting mfn %lx (pfn %lx) from L1 entry %" PRIpte
-            " for dom%d",
+            " for l1e_owner=%d, pg_owner=%d",
             mfn, get_gpfn_from_mfn(mfn),
-            l1e_get_intpte(l1e), d->domain_id);
-    if ( owner != NULL )
+            l1e_get_intpte(l1e), l1e_owner->domain_id, pg_owner->domain_id);
+    if ( real_pg_owner != NULL )
         put_page(page);
     return 0;
 }
@@ -996,19 +998,18 @@ get_page_from_l4e(
 #define unadjust_guest_l3e(_p, _d) ((void)(_d))
 #endif
 
-void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
+void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
 {
     unsigned long     pfn = l1e_get_pfn(l1e);
     struct page_info *page;
-    struct domain    *e;
+    struct domain    *pg_owner;
     struct vcpu      *v;
 
     if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || is_iomem_page(pfn) )
         return;
 
     page = mfn_to_page(pfn);
-
-    e = page_get_owner(page);
+    pg_owner = page_get_owner(page);
 
     /*
      * Check if this is a mapping that was established via a grant reference.
@@ -1024,17 +1025,17 @@ void put_page_from_l1e(l1_pgentry_t l1e,
      * Xen. All active grants can safely be cleaned up when the domain dies.)
      */
     if ( (l1e_get_flags(l1e) & _PAGE_GNTTAB) &&
-         !d->is_shutting_down && !d->is_dying )
+         !l1e_owner->is_shutting_down && !l1e_owner->is_dying )
     {
         MEM_LOG("Attempt to implicitly unmap a granted PTE %" PRIpte,
                 l1e_get_intpte(l1e));
-        domain_crash(d);
+        domain_crash(l1e_owner);
     }
 
     /* Remember we didn't take a type-count of foreign writable mappings
      * to paging-external domains */
     if ( (l1e_get_flags(l1e) & _PAGE_RW) && 
-         !(unlikely((e != d) && paging_mode_external(e))) )
+         ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) )
     {
         put_page_and_type(page);
     }
@@ -1044,9 +1045,9 @@ void put_page_from_l1e(l1_pgentry_t l1e,
         if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) == 
                        PGT_seg_desc_page)) &&
              unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) &&
-             (d == e) )
-        {
-            for_each_vcpu ( d, v )
+             (l1e_owner == pg_owner) )
+        {
+            for_each_vcpu ( pg_owner, v )
                 invalidate_shadow_ldt(v, 1);
         }
         put_page(page);
@@ -1137,7 +1138,7 @@ static int alloc_l1_table(struct page_in
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
     {
         if ( is_guest_l1_slot(i) &&
-             unlikely(!get_page_from_l1e(pl1e[i], d)) )
+             unlikely(!get_page_from_l1e(pl1e[i], d, d)) )
             goto fail;
 
         adjust_guest_l1e(pl1e[i], d);
@@ -1716,7 +1717,7 @@ static int mod_l1_entry(l1_pgentry_t *pl
             return rc;
         }
 
-        if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
+        if ( unlikely(!get_page_from_l1e(nl1e, d, FOREIGNDOM)) )
             return 0;
         
         adjust_guest_l1e(nl1e, d);
@@ -4224,7 +4225,7 @@ static int ptwr_emulated_update(
 
     /* Check the new PTE. */
     nl1e = l1e_from_intpte(val);
-    if ( unlikely(!get_page_from_l1e(nl1e, d)) )
+    if ( unlikely(!get_page_from_l1e(nl1e, d, d)) )
     {
         if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
              !do_cmpxchg && (l1e_get_flags(nl1e) & _PAGE_PRESENT) )
diff -r 07cf79dfb59c -r c4b048ce6a4b xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Jun 03 12:59:44 2009 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Jun 03 14:40:34 2009 +0100
@@ -816,7 +816,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl
     if ( !shadow_mode_refcounts(d) )
         return 1;
 
-    res = get_page_from_l1e(sl1e, d);
+    res = get_page_from_l1e(sl1e, d, d);
 
     // If a privileged domain is attempting to install a map of a page it does
     // not own, we let it succeed anyway.
@@ -828,7 +828,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl
          (d != owner) &&
          IS_PRIV_FOR(d, owner))
     {
-        res = get_page_from_l1e(sl1e, owner);
+        res = get_page_from_l1e(sl1e, d, owner);
         SHADOW_PRINTK("privileged domain %d installs map of mfn %05lx "
                        "which is owned by domain %d: %s\n",
                        d->domain_id, mfn_x(mfn), owner->domain_id,
diff -r 07cf79dfb59c -r c4b048ce6a4b xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Wed Jun 03 12:59:44 2009 +0100
+++ b/xen/include/asm-x86/mm.h  Wed Jun 03 14:40:34 2009 +0100
@@ -285,8 +285,9 @@ int  get_page_type(struct page_info *pag
 int  get_page_type(struct page_info *page, unsigned long type);
 int  put_page_type_preemptible(struct page_info *page);
 int  get_page_type_preemptible(struct page_info *page, unsigned long type);
-int  get_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
-void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d);
+int  get_page_from_l1e(
+    l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
+void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
 
 static inline void put_page_and_type(struct page_info *page)
 {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.