[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: make get_page_from_l1e() return a proper error code



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1299687371 0
# Node ID e9fab50d7b61d151d51a4b1088930c9e1ca2da47
# Parent  5f28dcea13555f7ab948c9cb95de3e79e0fbfc4b
x86: make get_page_from_l1e() return a proper error code

... so that the guest can actually know the reason for the (hypercall)
failure.

ptwr_do_page_fault() could propagate the error indicator received from
get_page_from_l1e() back to the guest in the high half of the error
code (entry_vector), provided we're sure all existing guests can deal
with that (or indicate so by means of a to-be-added guest feature
flag). Alternatively, a second virtual status register (like CR2)
could be introduced.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---


diff -r 5f28dcea1355 -r e9fab50d7b61 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Mar 09 16:15:36 2011 +0000
+++ b/xen/arch/x86/mm.c Wed Mar 09 16:16:11 2011 +0000
@@ -799,12 +799,12 @@
     bool_t write;
 
     if ( !(l1f & _PAGE_PRESENT) )
-        return 1;
+        return 0;
 
     if ( unlikely(l1f & l1_disallow_mask(l1e_owner)) )
     {
         MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(l1e_owner));
-        return 0;
+        return -EINVAL;
     }
 
     if ( !mfn_valid(mfn) ||
@@ -821,18 +821,21 @@
         if ( !iomem_access_permitted(pg_owner, mfn, mfn) )
         {
             if ( mfn != (PADDR_MASK >> PAGE_SHIFT) ) /* INVALID_MFN? */
+            {
                 MEM_LOG("Non-privileged (%u) attempt to map I/O space %08lx", 
                         pg_owner->domain_id, mfn);
-            return 0;
+                return -EPERM;
+            }
+            return -EINVAL;
         }
 
         if ( !(l1f & _PAGE_RW) || IS_PRIV(pg_owner) ||
              !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
-            return 1;
+            return 0;
         dprintk(XENLOG_G_WARNING,
                 "d%d: Forcing read-only access to MFN %lx\n",
                 l1e_owner->domain_id, mfn);
-        return -1;
+        return 1;
     }
 
     if ( unlikely(real_pg_owner != pg_owner) )
@@ -863,6 +866,7 @@
     {
         unsigned long x, nx, y = page->count_info;
         unsigned long cacheattr = pte_flags_to_cacheattr(l1f);
+        int err;
 
         if ( is_xen_heap_page(page) )
         {
@@ -870,7 +874,7 @@
                 put_page_type(page);
             put_page(page);
             MEM_LOG("Attempt to change cache attributes of Xen heap page");
-            return 0;
+            return -EACCES;
         }
 
         do {
@@ -878,7 +882,8 @@
             nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
         } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
 
-        if ( unlikely(update_xen_mappings(mfn, cacheattr) != 0) )
+        err = update_xen_mappings(mfn, cacheattr);
+        if ( unlikely(err) )
         {
             cacheattr = y & PGC_cacheattr_mask;
             do {
@@ -894,11 +899,11 @@
                     " from L1 entry %" PRIpte ") for %d",
                     mfn, get_gpfn_from_mfn(mfn),
                     l1e_get_intpte(l1e), l1e_owner->domain_id);
-            return 0;
+            return err;
         }
     }
 
-    return 1;
+    return 0;
 
  could_not_pin:
     MEM_LOG("Error getting mfn %lx (pfn %lx) from L1 entry %" PRIpte
@@ -907,7 +912,7 @@
             l1e_get_intpte(l1e), l1e_owner->domain_id, pg_owner->domain_id);
     if ( real_pg_owner != NULL )
         put_page(page);
-    return 0;
+    return -EBUSY;
 }
 
 
@@ -1197,17 +1202,20 @@
     unsigned long  pfn = page_to_mfn(page);
     l1_pgentry_t  *pl1e;
     unsigned int   i;
+    int            ret = 0;
 
     pl1e = map_domain_page(pfn);
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
     {
         if ( is_guest_l1_slot(i) )
-            switch ( get_page_from_l1e(pl1e[i], d, d) )
+            switch ( ret = get_page_from_l1e(pl1e[i], d, d) )
             {
+            default:
+                goto fail;
             case 0:
-                goto fail;
-            case -1:
+                break;
+            case 1:
                 l1e_remove_flags(pl1e[i], _PAGE_RW);
                 break;
             }
@@ -1225,7 +1233,7 @@
             put_page_from_l1e(pl1e[i], d);
 
     unmap_domain_page(pl1e);
-    return -EINVAL;
+    return ret;
 }
 
 static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
@@ -1794,11 +1802,13 @@
             return rc;
         }
 
-        switch ( get_page_from_l1e(nl1e, pt_dom, pg_dom) )
+        switch ( rc = get_page_from_l1e(nl1e, pt_dom, pg_dom) )
         {
+        default:
+            return 0;
         case 0:
-            return 0;
-        case -1:
+            break;
+        case 1:
             l1e_remove_flags(nl1e, _PAGE_RW);
             break;
         }
@@ -4948,7 +4958,7 @@
     nl1e = l1e_from_intpte(val);
     switch ( get_page_from_l1e(nl1e, d, d) )
     {
-    case 0:
+    default:
         if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
              !do_cmpxchg && (l1e_get_flags(nl1e) & _PAGE_PRESENT) )
         {
@@ -4968,7 +4978,9 @@
             return X86EMUL_UNHANDLEABLE;
         }
         break;
-    case -1:
+    case 0:
+        break;
+    case 1:
         l1e_remove_flags(nl1e, _PAGE_RW);
         break;
     }
diff -r 5f28dcea1355 -r e9fab50d7b61 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Mar 09 16:15:36 2011 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Mar 09 16:16:11 2011 +0000
@@ -872,7 +872,7 @@
     // If a privileged domain is attempting to install a map of a page it does
     // not own, we let it succeed anyway.
     //
-    if ( unlikely(!res) &&
+    if ( unlikely(res < 0) &&
          !shadow_mode_translate(d) &&
          mfn_valid(mfn = shadow_l1e_get_mfn(sl1e)) &&
          (owner = page_get_owner(mfn_to_page(mfn))) &&
@@ -883,11 +883,11 @@
         SHADOW_PRINTK("privileged domain %d installs map of mfn %05lx "
                        "which is owned by domain %d: %s\n",
                        d->domain_id, mfn_x(mfn), owner->domain_id,
-                       res ? "success" : "failed");
+                       res >= 0 ? "success" : "failed");
     }
 
     /* Okay, it might still be a grant mapping PTE.  Try it. */
-    if ( unlikely(!res) &&
+    if ( unlikely(res < 0) &&
          (type == p2m_grant_map_rw ||
           (type == p2m_grant_map_ro &&
            !(shadow_l1e_get_flags(sl1e) & _PAGE_RW))) )
@@ -900,7 +900,7 @@
             res = get_page_from_l1e(sl1e, d, page_get_owner(mfn_to_page(mfn)));
     }
 
-    if ( unlikely(!res) )
+    if ( unlikely(res < 0) )
     {
         perfc_incr(shadow_get_page_fail);
         SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n");
@@ -1229,15 +1229,15 @@
             TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
             switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
             {
-            case 0:
+            default:
                 /* Doesn't look like a pagetable. */
                 flags |= SHADOW_SET_ERROR;
                 new_sl1e = shadow_l1e_empty();
                 break;
-            case -1:
+            case 1:
                 shadow_l1e_remove_flags(new_sl1e, _PAGE_RW);
                 /* fall through */
-            default:
+            case 0:
                 shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);
                 break;
             }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.