[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/P2M: pass on errors from p2m_set_entry()



commit 98b8e967d030b1a5a1ecbbfced01abfd773a5fc0
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Thu May 8 13:58:46 2014 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu May 8 13:58:46 2014 +0200

    x86/P2M: pass on errors from p2m_set_entry()
    
    ... at least in a couple of straightforward cases.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/p2m.c |   24 ++++++++++--------------
 1 files changed, 10 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 03e9ad5..afff8db 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -501,7 +501,7 @@ void p2m_final_teardown(struct domain *d)
 }
 
 
-static void
+static int
 p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
                 unsigned int page_order)
 {
@@ -515,7 +515,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, 
unsigned long mfn,
         if ( need_iommu(p2m->domain) )
             for ( i = 0; i < (1 << page_order); i++ )
                 iommu_unmap_page(p2m->domain, mfn + i);
-        return;
+        return 0;
     }
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
@@ -531,8 +531,8 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, 
unsigned long mfn,
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
     }
-    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
-                  p2m->default_access);
+    return p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
+                         p2m->default_access);
 }
 
 void
@@ -957,8 +957,7 @@ int p2m_mem_paging_nominate(struct domain *d, unsigned long 
gfn)
         goto out;
 
     /* Fix p2m entry */
-    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
-    ret = 0;
+    ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
 
  out:
     gfn_unlock(p2m, gfn, 0);
@@ -1022,7 +1021,8 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long 
gfn)
         put_page(page);
 
     /* Remove mapping from p2m table */
-    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, 
a);
+    ret = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+                        p2m_ram_paged, a);
 
     /* Clear content before returning the page to Xen */
     scrub_one_page(page);
@@ -1030,8 +1030,6 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long 
gfn)
     /* Track number of paged gfns */
     atomic_inc(&d->paged_pages);
 
-    ret = 0;
-
  out_put:
     /* Put the page back so it gets freed */
     put_page(page);
@@ -1231,16 +1229,14 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
gfn, uint64_t buffer)
     /* Make the page already guest-accessible. If the pager still has a
      * pending resume operation, it will be idempotent p2m entry-wise,
      * but will unpause the vcpu */
-    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
-                  paging_mode_log_dirty(d) ? p2m_ram_logdirty :
-                  p2m_ram_rw, a);
+    ret = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                        paging_mode_log_dirty(d) ? p2m_ram_logdirty
+                                                 : p2m_ram_rw, a);
     set_gpfn_from_mfn(mfn_x(mfn), gfn);
 
     if ( !page_extant )
         atomic_dec(&d->paged_pages);
 
-    ret = 0;
-
  out:
     gfn_unlock(p2m, gfn, 0);
     return ret;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.