[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/shadow: Alter sh_get_ref() and sh_{, un}pin() to take a domain



commit 63a8f0c18a1456f170a90ed33e7bf30f48b6f624
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Aug 11 01:56:52 2014 +0100
Commit:     Tim Deegan <tim@xxxxxxx>
CommitDate: Fri Feb 20 14:47:39 2015 +0000

    x86/shadow: Alter sh_get_ref() and sh_{,un}pin() to take a domain
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  |    6 +++---
 xen/arch/x86/mm/shadow/multi.c   |   16 ++++++++--------
 xen/arch/x86/mm/shadow/private.h |   11 ++++-------
 3 files changed, 15 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index e2ea6cb..046201a 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1305,7 +1305,7 @@ static void _shadow_prealloc(
 
         /* Unpin this top-level shadow */
         trace_shadow_prealloc_unpin(d, smfn);
-        sh_unpin(v, smfn);
+        sh_unpin(d, smfn);
 
         /* See if that freed up enough space */
         if ( d->arch.paging.shadow.free_pages >= pages ) return;
@@ -1370,7 +1370,7 @@ static void shadow_blow_tables(struct domain *d)
     foreach_pinned_shadow(d, sp, t)
     {
         smfn = page_to_mfn(sp);
-        sh_unpin(v, smfn);
+        sh_unpin(d, smfn);
     }
 
     /* Second pass: unhook entries of in-use shadows */
@@ -2616,7 +2616,7 @@ void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int 
fast, int all)
         break;                                                          \
     }                                                                   \
     if ( sh_type_is_pinnable(d, t) )                                    \
-        sh_unpin(v, smfn);                                              \
+        sh_unpin(d, smfn);                                              \
     else if ( sh_type_has_up_pointer(d, t) )                            \
         sh_remove_shadow_via_pointer(v, smfn);                          \
     if( !fast                                                           \
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 7d82d90..ccb08d3 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -906,10 +906,10 @@ static int shadow_set_l4e(struct vcpu *v,
     {
         /* About to install a new reference */
         mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
-        ok = sh_get_ref(v, sl3mfn, paddr);
+        ok = sh_get_ref(d, sl3mfn, paddr);
         /* Are we pinning l3 shadows to handle wierd linux behaviour? */
         if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
-            ok |= sh_pin(v, sl3mfn);
+            ok |= sh_pin(d, sl3mfn);
         if ( !ok )
         {
             domain_crash(d);
@@ -956,7 +956,7 @@ static int shadow_set_l3e(struct vcpu *v,
     if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
     {
         /* About to install a new reference */
-        if ( !sh_get_ref(v, shadow_l3e_get_mfn(new_sl3e), paddr) )
+        if ( !sh_get_ref(d, shadow_l3e_get_mfn(new_sl3e), paddr) )
         {
             domain_crash(d);
             return SHADOW_SET_ERROR;
@@ -1018,7 +1018,7 @@ static int shadow_set_l2e(struct vcpu *v,
         ASSERT(mfn_to_page(sl1mfn)->u.sh.head);
 
         /* About to install a new reference */
-        if ( !sh_get_ref(v, sl1mfn, paddr) )
+        if ( !sh_get_ref(d, sl1mfn, paddr) )
         {
             domain_crash(d);
             return SHADOW_SET_ERROR;
@@ -1537,7 +1537,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 
shadow_type)
             page_list_for_each_safe(sp, t, 
&d->arch.paging.shadow.pinned_shadows)
             {
                 if ( sp->u.sh.type == SH_type_l3_64_shadow )
-                    sh_unpin(v, page_to_mfn(sp));
+                    sh_unpin(d, page_to_mfn(sp));
             }
             d->arch.paging.shadow.opt_flags &= ~SHOPT_LINUX_L3_TOPLEVEL;
             sh_reset_l3_up_pointers(v);
@@ -3866,7 +3866,7 @@ sh_set_toplevel_shadow(struct vcpu *v,
     ASSERT(mfn_valid(smfn));
 
     /* Pin the shadow and put it (back) on the list of pinned shadows */
-    if ( sh_pin(v, smfn) == 0 )
+    if ( sh_pin(d, smfn) == 0 )
     {
         SHADOW_ERROR("can't pin %#lx as toplevel shadow\n", mfn_x(smfn));
         domain_crash(d);
@@ -3874,7 +3874,7 @@ sh_set_toplevel_shadow(struct vcpu *v,
 
     /* Take a ref to this page: it will be released in sh_detach_old_tables()
      * or the next call to set_toplevel_shadow() */
-    if ( !sh_get_ref(v, smfn, 0) )
+    if ( !sh_get_ref(d, smfn, 0) )
     {
         SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
         domain_crash(d);
@@ -3895,7 +3895,7 @@ sh_set_toplevel_shadow(struct vcpu *v,
         /* Need to repin the old toplevel shadow if it's been unpinned
          * by shadow_prealloc(): in PV mode we're still running on this
          * shadow and it's not safe to free it yet. */
-        if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(v, old_smfn) )
+        if ( !mfn_to_page(old_smfn)->u.sh.pinned && !sh_pin(d, old_smfn) )
         {
             SHADOW_ERROR("can't re-pin %#lx\n", mfn_x(old_smfn));
             domain_crash(d);
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index a848c94..cddfde6 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -538,9 +538,8 @@ void sh_destroy_shadow(struct domain *d, mfn_t smfn);
  * and the physical address of the shadow entry that holds the ref (or zero
  * if the ref is held by something else).
  * Returns 0 for failure, 1 for success. */
-static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa)
+static inline int sh_get_ref(struct domain *d, mfn_t smfn, paddr_t entry_pa)
 {
-    struct domain *d = v->domain;
     u32 x, nx;
     struct page_info *sp = mfn_to_page(smfn);
 
@@ -645,9 +644,8 @@ prev_pinned_shadow(const struct page_info *page,
 /* Pin a shadow page: take an extra refcount, set the pin bit,
  * and put the shadow at the head of the list of pinned shadows.
  * Returns 0 for failure, 1 for success. */
-static inline int sh_pin(struct vcpu *v, mfn_t smfn)
+static inline int sh_pin(struct domain *d, mfn_t smfn)
 {
-    struct domain *d = v->domain;
     struct page_info *sp[4];
     struct page_list_head *pin_list;
     unsigned int i, pages;
@@ -681,7 +679,7 @@ static inline int sh_pin(struct vcpu *v, mfn_t smfn)
     else
     {
         /* Not pinned: pin it! */
-        if ( !sh_get_ref(v, smfn, 0) )
+        if ( !sh_get_ref(d, smfn, 0) )
             return 0;
         sp[0]->u.sh.pinned = 1;
     }
@@ -695,9 +693,8 @@ static inline int sh_pin(struct vcpu *v, mfn_t smfn)
 
 /* Unpin a shadow page: unset the pin bit, take the shadow off the list
  * of pinned shadows, and release the extra ref. */
-static inline void sh_unpin(struct vcpu *v, mfn_t smfn)
+static inline void sh_unpin(struct domain *d, mfn_t smfn)
 {
-    struct domain *d = v->domain;
     struct page_list_head tmp_list, *pin_list;
     struct page_info *sp, *next;
     unsigned int i, head_type;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.