[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-4.0-testing] x86: run-time callers of map_pages_to_xen() must check for errors



# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxxxx>
# Date 1300197431 0
# Node ID 999693539839e62e7604e348f2dbe955b0b71d0f
# Parent  80e6045111e1ff7e364b8d381bf456edb9ad2a0f
x86: run-time callers of map_pages_to_xen() must check for errors

Again, (out-of-memory) errors must not cause hypervisor crashes, and
hence ought to be propagated.

This also adjusts the cache attribute changing loop in
get_page_from_l1e() to not go through an unnecessary iteration. While
this could be considered mere cleanup, it is actually a requirement
for the subsequent now necessary error recovery path.

Also make a few functions static, easing the check for potential
callers needing adjustment.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
xen-unstable changeset:   22997:5f28dcea1355
xen-unstable date:        Wed Mar 09 16:15:36 2011 +0000

x86: don't BUG() post-boot in alloc_xen_pagetable()

Instead, propagate the condition to the caller, all of which also get
adjusted to check for that situation.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
xen-unstable changeset:   22996:1eeccafe9042
xen-unstable date:        Wed Mar 09 16:14:59 2011 +0000
---


diff -r 80e6045111e1 -r 999693539839 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Mar 14 17:21:52 2011 +0000
+++ b/xen/arch/x86/mm.c Tue Mar 15 13:57:11 2011 +0000
@@ -744,8 +744,9 @@
     return (page_get_owner(page) == dom_io);
 }
 
-static void update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
+static int update_xen_mappings(unsigned long mfn, unsigned long cacheattr)
 {
+    int err = 0;
 #ifdef __x86_64__
     bool_t alias = mfn >= PFN_DOWN(xen_phys_start) &&
          mfn < PFN_UP(xen_phys_start + (unsigned long)_end - XEN_VIRT_START);
@@ -753,12 +754,14 @@
         XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
 
     if ( unlikely(alias) && cacheattr )
-        map_pages_to_xen(xen_va, mfn, 1, 0);
-    map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+        err = map_pages_to_xen(xen_va, mfn, 1, 0);
+    if ( !err )
+        err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
                      PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
-    if ( unlikely(alias) && !cacheattr )
-        map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
+    if ( unlikely(alias) && !cacheattr && !err )
+        err = map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
 #endif
+    return err;
 }
 
 int
@@ -770,6 +773,7 @@
     uint32_t l1f = l1e_get_flags(l1e);
     struct vcpu *curr = current;
     struct domain *real_pg_owner;
+    bool_t write;
 
     if ( !(l1f & _PAGE_PRESENT) )
         return 1;
@@ -820,9 +824,9 @@
      * contribute to writeable mapping refcounts.  (This allows the
      * qemu-dm helper process in dom0 to map the domain's memory without
      * messing up the count of "real" writable mappings.) */
-    if ( (l1f & _PAGE_RW) &&
-         ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) &&
-         !get_page_type(page, PGT_writable_page) )
+    write = (l1f & _PAGE_RW) &&
+            ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner));
+    if ( write && !get_page_type(page, PGT_writable_page) )
         goto could_not_pin;
 
     if ( pte_flags_to_cacheattr(l1f) !=
@@ -833,22 +837,36 @@
 
         if ( is_xen_heap_page(page) )
         {
-            if ( (l1f & _PAGE_RW) &&
-                 ((l1e_owner == pg_owner) || !paging_mode_external(pg_owner)) )
+            if ( write )
                 put_page_type(page);
             put_page(page);
             MEM_LOG("Attempt to change cache attributes of Xen heap page");
             return 0;
         }
 
-        while ( ((y & PGC_cacheattr_mask) >> PGC_cacheattr_base) != cacheattr )
-        {
+        do {
             x  = y;
             nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
-            y  = cmpxchg(&page->count_info, x, nx);
+        } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
+
+        if ( unlikely(update_xen_mappings(mfn, cacheattr) != 0) )
+        {
+            cacheattr = y & PGC_cacheattr_mask;
+            do {
+                x  = y;
+                nx = (x & ~PGC_cacheattr_mask) | cacheattr;
+            } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
+
+            if ( write )
+                put_page_type(page);
+            put_page(page);
+
+            MEM_LOG("Error updating mappings for mfn %lx (pfn %lx,"
+                    " from L1 entry %" PRIpte ") for %d",
+                    mfn, get_gpfn_from_mfn(mfn),
+                    l1e_get_intpte(l1e), l1e_owner->domain_id);
+            return 0;
         }
-
-        update_xen_mappings(mfn, cacheattr);
     }
 
     return 1;
@@ -1980,6 +1998,21 @@
 
 #endif
 
+static int cleanup_page_cacheattr(struct page_info *page)
+{
+    uint32_t cacheattr =
+        (page->count_info & PGC_cacheattr_mask) >> PGC_cacheattr_base;
+
+    if ( likely(cacheattr == 0) )
+        return 0;
+
+    page->count_info &= ~PGC_cacheattr_mask;
+
+    BUG_ON(is_xen_heap_page(page));
+
+    return update_xen_mappings(page_to_mfn(page), 0);
+}
+
 void put_page(struct page_info *page)
 {
     unsigned long nx, x, y = page->count_info;
@@ -1993,8 +2026,10 @@
 
     if ( unlikely((nx & PGC_count_mask) == 0) )
     {
-        cleanup_page_cacheattr(page);
-        free_domheap_page(page);
+        if ( cleanup_page_cacheattr(page) == 0 )
+            free_domheap_page(page);
+        else
+            MEM_LOG("Leaking pfn %lx", page_to_mfn(page));
     }
 }
 
@@ -2446,21 +2481,6 @@
     return __get_page_type(page, type, 1);
 }
 
-void cleanup_page_cacheattr(struct page_info *page)
-{
-    uint32_t cacheattr =
-        (page->count_info & PGC_cacheattr_mask) >> PGC_cacheattr_base;
-
-    if ( likely(cacheattr == 0) )
-        return;
-
-    page->count_info &= ~PGC_cacheattr_mask;
-
-    BUG_ON(is_xen_heap_page(page));
-
-    update_xen_mappings(page_to_mfn(page), 0);
-}
-
 
 int new_guest_cr3(unsigned long mfn)
 {
@@ -4834,8 +4854,11 @@
     while ( nr_mfns != 0 )
     {
 #ifdef __x86_64__
-        l3_pgentry_t *pl3e = virt_to_xen_l3e(virt);
-        l3_pgentry_t ol3e = *pl3e;
+        l3_pgentry_t ol3e, *pl3e = virt_to_xen_l3e(virt);
+
+        if ( !pl3e )
+            return -ENOMEM;
+        ol3e = *pl3e;
 
         if ( cpu_has_page1gb &&
              !(((virt >> PAGE_SHIFT) | mfn) &
@@ -4955,6 +4978,8 @@
 #endif
 
         pl2e = virt_to_xen_l2e(virt);
+        if ( !pl2e )
+            return -ENOMEM;
 
         if ( ((((virt>>PAGE_SHIFT) | mfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
              (nr_mfns >= (1<<PAGETABLE_ORDER)) &&
diff -r 80e6045111e1 -r 999693539839 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Mon Mar 14 17:21:52 2011 +0000
+++ b/xen/arch/x86/x86_32/mm.c  Tue Mar 15 13:57:11 2011 +0000
@@ -48,7 +48,8 @@
     if ( !early_boot )
     {
         void *v = alloc_xenheap_page();
-        BUG_ON(v == NULL);
+
+        BUG_ON(!dom0 && !v);
         return v;
     }
 
diff -r 80e6045111e1 -r 999693539839 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Mon Mar 14 17:21:52 2011 +0000
+++ b/xen/arch/x86/x86_64/mm.c  Tue Mar 15 13:57:11 2011 +0000
@@ -80,8 +80,9 @@
     if ( !early_boot )
     {
         struct page_info *pg = alloc_domheap_page(NULL, 0);
-        BUG_ON(pg == NULL);
-        return page_to_virt(pg);
+
+        BUG_ON(!dom0 && !pg);
+        return pg ? page_to_virt(pg) : NULL;
     }
 
     mfn = alloc_boot_pages(1, 1);
@@ -96,6 +97,9 @@
     if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
     {
         l3_pgentry_t *pl3e = alloc_xen_pagetable();
+
+        if ( !pl3e )
+            return NULL;
         clear_page(pl3e);
         l4e_write(pl4e, l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR));
     }
@@ -108,9 +112,15 @@
     l3_pgentry_t *pl3e;
 
     pl3e = virt_to_xen_l3e(v);
+    if ( !pl3e )
+        return NULL;
+
     if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
     {
         l2_pgentry_t *pl2e = alloc_xen_pagetable();
+
+        if ( !pl2e )
+            return NULL;
         clear_page(pl2e);
         l3e_write(pl3e, l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR));
     }
@@ -425,6 +435,7 @@
     l3_pgentry_t *l3_ro_mpt = NULL;
     l2_pgentry_t *l2_ro_mpt = NULL;
     struct page_info *l1_pg;
+    int err = 0;
 
     smap = info->spfn & (~((1UL << (L2_PAGETABLE_SHIFT - 2)) -1));
 
@@ -475,24 +486,25 @@
         memflags = MEMF_node(phys_to_nid(i << PAGE_SHIFT));
 
         l1_pg = mfn_to_page(alloc_hotadd_mfn(info));
-        map_pages_to_xen(rwva,
-                    page_to_mfn(l1_pg),
-                    1UL << PAGETABLE_ORDER,
-                    PAGE_HYPERVISOR);
+        err = map_pages_to_xen(rwva, page_to_mfn(l1_pg),
+                               1UL << PAGETABLE_ORDER,
+                               PAGE_HYPERVISOR);
+        if ( err )
+            break;
         memset((void *)rwva, 0x55, 1UL << L2_PAGETABLE_SHIFT);
         /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */
         l2e_write(&l2_ro_mpt[l2_table_offset(va)], l2e_from_page(l1_pg, 
_PAGE_PSE|_PAGE_PRESENT));
     }
 #undef CNT
 #undef MFN
-    return 0;
+    return err;
 }
 
 /*
  * Allocate and map the machine-to-phys table.
  * The L3 for RO/RWRW MPT and the L2 for compatible MPT should be setup already
  */
-int setup_m2p_table(struct mem_hotadd_info *info)
+static int setup_m2p_table(struct mem_hotadd_info *info)
 {
     unsigned long i, va, smap, emap;
     unsigned int n, memflags;
@@ -546,11 +558,13 @@
         else
         {
             l1_pg = mfn_to_page(alloc_hotadd_mfn(info));
-            map_pages_to_xen(
+            ret = map_pages_to_xen(
                         RDWR_MPT_VIRT_START + i * sizeof(unsigned long),
                         page_to_mfn(l1_pg),
                         1UL << PAGETABLE_ORDER,
                         PAGE_HYPERVISOR);
+            if ( ret )
+                goto error;
             memset((void *)(RDWR_MPT_VIRT_START + i * sizeof(unsigned long)),
                    0x55, 1UL << L2_PAGETABLE_SHIFT);
 
@@ -887,13 +901,13 @@
     flush_tlb_all();
 }
 
-/* Should we be paraniod failure in map_pages_to_xen? */
 static int setup_frametable_chunk(void *start, void *end,
                                   struct mem_hotadd_info *info)
 {
     unsigned long s = (unsigned long)start;
     unsigned long e = (unsigned long)end;
     unsigned long mfn;
+    int err;
 
     ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1)));
     ASSERT(!(e & ((1 << L2_PAGETABLE_SHIFT) - 1)));
@@ -901,14 +915,17 @@
     for ( ; s < e; s += (1UL << L2_PAGETABLE_SHIFT))
     {
         mfn = alloc_hotadd_mfn(info);
-        map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR);
+        err = map_pages_to_xen(s, mfn, 1UL << PAGETABLE_ORDER,
+                               PAGE_HYPERVISOR);
+        if ( err )
+            return err;
     }
     memset(start, -1, s - (unsigned long)start);
 
     return 0;
 }
 
-int extend_frame_table(struct mem_hotadd_info *info)
+static int extend_frame_table(struct mem_hotadd_info *info)
 {
     unsigned long cidx, nidx, eidx, spfn, epfn;
 
@@ -929,12 +946,16 @@
 
     while ( cidx < eidx )
     {
+        int err;
+
         nidx = find_next_bit(pdx_group_valid, eidx, cidx);
         if ( nidx >= eidx )
             nidx = eidx;
-        setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
+        err = setup_frametable_chunk(pdx_to_page(cidx * PDX_GROUP_COUNT ),
                                      pdx_to_page(nidx * PDX_GROUP_COUNT),
                                      info);
+        if ( err )
+            return err;
 
         cidx = find_next_zero_bit(pdx_group_valid, eidx, nidx);
     }
diff -r 80e6045111e1 -r 999693539839 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Mon Mar 14 17:21:52 2011 +0000
+++ b/xen/include/asm-x86/mm.h  Tue Mar 15 13:57:11 2011 +0000
@@ -301,8 +301,6 @@
                    int preemptible);
 int _shadow_mode_refcounts(struct domain *d);
 
-void cleanup_page_cacheattr(struct page_info *page);
-
 int is_iomem_page(unsigned long mfn);
 
 struct domain *page_get_owner_and_reference(struct page_info *page);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.