[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 4/4] x86/shadow: Switch to using bool



 * sh_pin() has boolean properties, so switch its return type.
 * sh_remove_shadows() uses ints everywhere other than its stub.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Tim Deegan <tim@xxxxxxx>

v2:
 * Swich sh_remove_shadows() back to using ints.
 * Fix more comments and prototypes.
---
 xen/arch/x86/mm/shadow/common.c  |  4 ++--
 xen/arch/x86/mm/shadow/multi.c   | 42 ++++++++++++++++++++--------------------
 xen/arch/x86/mm/shadow/none.c    |  6 +++---
 xen/arch/x86/mm/shadow/private.h | 16 ++++++++-------
 xen/include/asm-x86/paging.h     | 33 +++++++++++++++++--------------
 xen/include/asm-x86/shadow.h     |  2 +-
 6 files changed, 54 insertions(+), 49 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2e64a77..36f5746 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -40,7 +40,7 @@
 
 DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
 
-static int sh_enable_log_dirty(struct domain *, bool_t log_global);
+static int sh_enable_log_dirty(struct domain *, bool log_global);
 static int sh_disable_log_dirty(struct domain *);
 static void sh_clean_dirty_bitmap(struct domain *);
 
@@ -3553,7 +3553,7 @@ shadow_write_p2m_entry(struct domain *d, unsigned long 
gfn,
 /* Shadow specific code which is called in paging_log_dirty_enable().
  * Return 0 if no problem found.
  */
-static int sh_enable_log_dirty(struct domain *d, bool_t log_global)
+static int sh_enable_log_dirty(struct domain *d, bool log_global)
 {
     int ret;
 
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index f65ffc6..c9c2252 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -367,9 +367,9 @@ static void sh_audit_gw(struct vcpu *v, const walk_t *gw)
 
 /*
  * Write a new value into the guest pagetable, and update the shadows
- * appropriately.  Returns 0 if we page-faulted, 1 for success.
+ * appropriately.  Returns false if we page-faulted, true for success.
  */
-static bool_t
+static bool
 sh_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
 {
 #if CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS
@@ -383,17 +383,17 @@ sh_write_guest_entry(struct vcpu *v, intpte_t *p, 
intpte_t new, mfn_t gmfn)
 
     return !failed;
 #else
-    return 0;
+    return false;
 #endif
 }
 
 /*
  * Cmpxchg a new value into the guest pagetable, and update the shadows
- * appropriately. Returns 0 if we page-faulted, 1 if not.
+ * appropriately. Returns false if we page-faulted, true if not.
  * N.B. caller should check the value of "old" to see if the cmpxchg itself
  * was successful.
  */
-static bool_t
+static bool
 sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old,
                        intpte_t new, mfn_t gmfn)
 {
@@ -410,7 +410,7 @@ sh_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, 
intpte_t *old,
 
     return !failed;
 #else
-    return 0;
+    return false;
 #endif
 }
 
@@ -530,7 +530,7 @@ _sh_propagate(struct vcpu *v,
     gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
     u32 pass_thru_flags;
     u32 gflags, sflags;
-    bool_t mmio_mfn;
+    bool mmio_mfn;
 
     /* We don't shadow PAE l3s */
     ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
@@ -3616,11 +3616,11 @@ static int sh_page_fault(struct vcpu *v,
 
 
 /*
- * Called when the guest requests an invlpg.  Returns 1 if the invlpg
- * instruction should be issued on the hardware, or 0 if it's safe not
+ * Called when the guest requests an invlpg.  Returns true if the invlpg
+ * instruction should be issued on the hardware, or false if it's safe not
  * to do so.
  */
-static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
+static bool sh_invlpg(struct vcpu *v, unsigned long va)
 {
     mfn_t sl1mfn;
     shadow_l2e_t sl2e;
@@ -3645,7 +3645,7 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
         if ( !(shadow_l4e_get_flags(
                    sh_linear_l4_table(v)[shadow_l4_linear_offset(va)])
                & _PAGE_PRESENT) )
-            return 0;
+            return false;
         /* This must still be a copy-from-user because we don't have the
          * paging lock, and the higher-level shadows might disappear
          * under our feet. */
@@ -3654,16 +3654,16 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long 
va)
                               sizeof (sl3e)) != 0 )
         {
             perfc_incr(shadow_invlpg_fault);
-            return 0;
+            return false;
         }
         if ( !(shadow_l3e_get_flags(sl3e) & _PAGE_PRESENT) )
-            return 0;
+            return false;
     }
 #else /* SHADOW_PAGING_LEVELS == 3 */
     if ( 
!(l3e_get_flags(v->arch.paging.shadow.l3table[shadow_l3_linear_offset(va)])
            & _PAGE_PRESENT) )
         // no need to flush anything if there's no SL2...
-        return 0;
+        return false;
 #endif
 
     /* This must still be a copy-from-user because we don't have the shadow
@@ -3673,14 +3673,14 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long 
va)
                           sizeof (sl2e)) != 0 )
     {
         perfc_incr(shadow_invlpg_fault);
-        return 0;
+        return false;
     }
 
     // If there's nothing shadowed for this particular sl2e, then
     // there is no need to do an invlpg, either...
     //
     if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
-        return 0;
+        return false;
 
     // Check to see if the SL2 is a splintered superpage...
     // If so, then we'll need to flush the entire TLB (because that's
@@ -3691,7 +3691,7 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long va)
          == SH_type_fl1_shadow )
     {
         flush_tlb_local();
-        return 0;
+        return false;
     }
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
@@ -3718,13 +3718,13 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long 
va)
             {
                 perfc_incr(shadow_invlpg_fault);
                 paging_unlock(d);
-                return 0;
+                return false;
             }
 
             if ( !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT) )
             {
                 paging_unlock(d);
-                return 0;
+                return false;
             }
 
             sl1mfn = shadow_l2e_get_mfn(sl2e);
@@ -3742,12 +3742,12 @@ static bool_t sh_invlpg(struct vcpu *v, unsigned long 
va)
             }
             paging_unlock(d);
             /* Need the invlpg, to pick up the disappeareance of the sl1e */
-            return 1;
+            return true;
         }
     }
 #endif
 
-    return 1;
+    return true;
 }
 
 
diff --git a/xen/arch/x86/mm/shadow/none.c b/xen/arch/x86/mm/shadow/none.c
index 41ce593..9e6ad23 100644
--- a/xen/arch/x86/mm/shadow/none.c
+++ b/xen/arch/x86/mm/shadow/none.c
@@ -1,7 +1,7 @@
 #include <xen/mm.h>
 #include <asm/shadow.h>
 
-static int _enable_log_dirty(struct domain *d, bool_t log_global)
+static int _enable_log_dirty(struct domain *d, bool log_global)
 {
     ASSERT(is_pv_domain(d));
     return -EOPNOTSUPP;
@@ -37,10 +37,10 @@ static int _page_fault(struct vcpu *v, unsigned long va,
     return 0;
 }
 
-static bool_t _invlpg(struct vcpu *v, unsigned long va)
+static bool _invlpg(struct vcpu *v, unsigned long va)
 {
     ASSERT_UNREACHABLE();
-    return 1;
+    return true;
 }
 
 static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 472676c..46d9bab 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -622,15 +622,17 @@ prev_pinned_shadow(struct page_info *page,
           pos ? (tmp = prev_pinned_shadow(pos, (dom)), 1) : 0;  \
           pos = tmp )
 
-/* Pin a shadow page: take an extra refcount, set the pin bit,
+/*
+ * Pin a shadow page: take an extra refcount, set the pin bit,
  * and put the shadow at the head of the list of pinned shadows.
- * Returns 0 for failure, 1 for success. */
-static inline int sh_pin(struct domain *d, mfn_t smfn)
+ * Returns false for failure, true for success.
+ */
+static inline bool sh_pin(struct domain *d, mfn_t smfn)
 {
     struct page_info *sp[4];
     struct page_list_head *pin_list;
     unsigned int i, pages;
-    bool_t already_pinned;
+    bool already_pinned;
 
     ASSERT(mfn_valid(smfn));
     sp[0] = mfn_to_page(smfn);
@@ -641,7 +643,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
 
     pin_list = &d->arch.paging.shadow.pinned_shadows;
     if ( already_pinned && sp[0] == page_list_first(pin_list) )
-        return 1;
+        return true;
 
     /* Treat the up-to-four pages of the shadow as a unit in the list ops */
     for ( i = 1; i < pages; i++ )
@@ -661,7 +663,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
     {
         /* Not pinned: pin it! */
         if ( !sh_get_ref(d, smfn, 0) )
-            return 0;
+            return false;
         sp[0]->u.sh.pinned = 1;
     }
 
@@ -669,7 +671,7 @@ static inline int sh_pin(struct domain *d, mfn_t smfn)
     for ( i = pages; i > 0; i-- )
         page_list_add(sp[i - 1], pin_list);
 
-    return 1;
+    return true;
 }
 
 /* Unpin a shadow page: unset the pin bit, take the shadow off the list
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index f262c9e..44e86d6 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -93,9 +93,9 @@ struct shadow_paging_mode {
                                             unsigned long new,
                                             unsigned int bytes,
                                             struct sh_emulate_ctxt *sh_ctxt);
-    bool_t        (*write_guest_entry     )(struct vcpu *v, intpte_t *p,
+    bool          (*write_guest_entry     )(struct vcpu *v, intpte_t *p,
                                             intpte_t new, mfn_t gmfn);
-    bool_t        (*cmpxchg_guest_entry   )(struct vcpu *v, intpte_t *p,
+    bool          (*cmpxchg_guest_entry   )(struct vcpu *v, intpte_t *p,
                                             intpte_t *old, intpte_t new,
                                             mfn_t gmfn);
     mfn_t         (*make_monitor_table    )(struct vcpu *v);
@@ -115,7 +115,7 @@ struct shadow_paging_mode {
 struct paging_mode {
     int           (*page_fault            )(struct vcpu *v, unsigned long va,
                                             struct cpu_user_regs *regs);
-    bool_t        (*invlpg                )(struct vcpu *v, unsigned long va);
+    bool          (*invlpg                )(struct vcpu *v, unsigned long va);
     unsigned long (*gva_to_gfn            )(struct vcpu *v,
                                             struct p2m_domain *p2m,
                                             unsigned long va,
@@ -292,11 +292,13 @@ static inline void paging_update_paging_modes(struct vcpu 
*v)
 }
 
 
-/* Write a new value into the guest pagetable, and update the
- * paging-assistance state appropriately.  Returns 0 if we page-faulted,
- * 1 for success. */
-static inline bool_t paging_write_guest_entry(struct vcpu *v, intpte_t *p,
-                                              intpte_t new, mfn_t gmfn)
+/*
+ * Write a new value into the guest pagetable, and update the
+ * paging-assistance state appropriately.  Returns false if we page-faulted,
+ * true for success.
+ */
+static inline bool paging_write_guest_entry(
+    struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn)
 {
 #ifdef CONFIG_SHADOW_PAGING
     if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
@@ -307,13 +309,14 @@ static inline bool_t paging_write_guest_entry(struct vcpu 
*v, intpte_t *p,
 }
 
 
-/* Cmpxchg a new value into the guest pagetable, and update the
- * paging-assistance state appropriately.  Returns 0 if we page-faulted,
- * 1 if not.  N.B. caller should check the value of "old" to see if the
- * cmpxchg itself was successful. */
-static inline bool_t paging_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
-                                                intpte_t *old, intpte_t new,
-                                                mfn_t gmfn)
+/*
+ * Cmpxchg a new value into the guest pagetable, and update the
+ * paging-assistance state appropriately.  Returns false if we page-faulted,
+ * true if not.  N.B. caller should check the value of "old" to see if the
+ * cmpxchg itself was successful.
+ */
+static inline bool paging_cmpxchg_guest_entry(
+    struct vcpu *v, intpte_t *p, intpte_t *old, intpte_t new, mfn_t gmfn)
 {
 #ifdef CONFIG_SHADOW_PAGING
     if ( unlikely(paging_mode_shadow(v->domain)) && paging_get_hostmode(v) )
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 7e1ed3b..678b5d4 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -102,7 +102,7 @@ int shadow_set_allocation(struct domain *d, unsigned int 
pages,
     ({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
 
 static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
-                                     bool_t fast, bool_t all) {}
+                                     int fast, int all) {}
 
 static inline void shadow_blow_tables_per_domain(struct domain *d) {}
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.