[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 07/14] x86/p2m: Coding style cleanup



No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm/p2m.c     | 29 ++++++++-------
 xen/include/asm-x86/p2m.h | 91 ++++++++++++++++++++++++++---------------------
 2 files changed, 65 insertions(+), 55 deletions(-)

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 8b9898a..f52a71e 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -444,7 +444,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn_l,
         mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
     }
 
-    if (unlikely((p2m_is_broken(*t))))
+    if ( unlikely(p2m_is_broken(*t)) )
     {
         /* Return invalid_mfn to avoid caller's access */
         mfn = INVALID_MFN;
@@ -669,7 +669,7 @@ void p2m_teardown(struct p2m_domain *p2m)
     struct page_info *pg;
     struct domain *d;
 
-    if (p2m == NULL)
+    if ( p2m == NULL )
         return;
 
     d = p2m->domain;
@@ -776,7 +776,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
                               &a, 0, NULL, NULL);
         if ( p2m_is_shared(ot) )
         {
-            /* Do an unshare to cleanly take care of all corner 
+            /* Do an unshare to cleanly take care of all corner
              * cases. */
             int rc;
             rc = mem_sharing_unshare_page(p2m->domain,
@@ -793,7 +793,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
                  * However, all current (changeset 3432abcf9380) code
                  * paths avoid this unsavoury situation. For now.
                  *
-                 * Foreign domains are okay to place an event as they 
+                 * Foreign domains are okay to place an event as they
                  * won't go to sleep. */
                 (void)mem_sharing_notify_enomem(p2m->domain,
                                                 gfn_x(gfn_add(gfn, i)), false);
@@ -808,7 +808,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
             /* Really shouldn't be unmapping grant/foreign maps this way */
             domain_crash(d);
             p2m_unlock(p2m);
-            
+
             return -EINVAL;
         }
         else if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
@@ -934,7 +934,7 @@ int p2m_change_type_one(struct domain *d, unsigned long 
gfn_l,
 }
 
 /* Modify the p2m type of a range of gfns from ot to nt. */
-void p2m_change_type_range(struct domain *d, 
+void p2m_change_type_range(struct domain *d,
                            unsigned long start, unsigned long end,
                            p2m_type_t ot, p2m_type_t nt)
 {
@@ -1568,7 +1568,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
gfn_l, uint64_t buffer)
 
     if ( user_ptr )
         /* Sanity check the buffer and bail out early if trouble */
-        if ( (buffer & (PAGE_SIZE - 1)) || 
+        if ( (buffer & (PAGE_SIZE - 1)) ||
              (!access_ok(user_ptr, PAGE_SIZE)) )
             return -EINVAL;
 
@@ -1613,7 +1613,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
gfn_l, uint64_t buffer)
                                  "bytes left %d\n", gfn_l, d->domain_id, rc);
             ret = -EFAULT;
             put_page(page); /* Don't leak pages */
-            goto out;            
+            goto out;
         }
     }
 
@@ -1685,7 +1685,7 @@ static struct p2m_domain *
 p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
 {
     struct list_head *lru_list = &p2m_get_hostp2m(d)->np2m_list;
-    
+
     ASSERT(!list_empty(lru_list));
 
     if ( p2m == NULL )
@@ -1825,13 +1825,12 @@ p2m_get_nestedp2m_locked(struct vcpu *v)
     /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */
     np2m_base &= ~(0xfffull);
 
-    if (nv->nv_flushp2m && nv->nv_p2m) {
+    if ( nv->nv_flushp2m && nv->nv_p2m )
         nv->nv_p2m = NULL;
-    }
 
     nestedp2m_lock(d);
     p2m = nv->nv_p2m;
-    if ( p2m ) 
+    if ( p2m )
     {
         p2m_lock(p2m);
         if ( p2m->np2m_base == np2m_base )
@@ -1889,7 +1888,7 @@ struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v)
 struct p2m_domain *
 p2m_get_p2m(struct vcpu *v)
 {
-    if (!nestedhvm_is_n2(v))
+    if ( !nestedhvm_is_n2(v) )
         return p2m_get_hostp2m(v->domain);
 
     return p2m_get_nestedp2m(v);
@@ -2554,7 +2553,7 @@ void audit_p2m(struct domain *d,
     p2m_lock(p2m);
     pod_lock(p2m);
 
-    if (p2m->audit_p2m)
+    if ( p2m->audit_p2m )
         pmbad = p2m->audit_p2m(p2m);
 
     /* Audit part two: walk the domain's page allocation list, checking
@@ -2615,7 +2614,7 @@ void audit_p2m(struct domain *d,
 
     pod_unlock(p2m);
     p2m_unlock(p2m);
- 
+
     P2M_PRINTK("p2m audit complete\n");
     if ( orphans_count | mpbad | pmbad )
         P2M_PRINTK("p2m audit found %lu orphans\n", orphans_count);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 04c2104..74d0cf6 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -34,9 +34,9 @@
 extern bool_t opt_hap_1gb, opt_hap_2mb;
 
 /*
- * The upper levels of the p2m pagetable always contain full rights; all 
+ * The upper levels of the p2m pagetable always contain full rights; all
  * variation in the access control bits is made in the level-1 PTEs.
- * 
+ *
  * In addition to the phys-to-machine translation, each p2m PTE contains
  * *type* information about the gfn it translates, helping Xen to decide
  * on the correct course of action when handling a page-fault to that
@@ -47,8 +47,8 @@ extern bool_t opt_hap_1gb, opt_hap_2mb;
  */
 
 /*
- * AMD IOMMU: When we share p2m table with iommu, bit 52 -bit 58 in pte 
- * cannot be non-zero, otherwise, hardware generates io page faults when 
+ * AMD IOMMU: When we share p2m table with iommu, bit 52 -bit 58 in pte
+ * cannot be non-zero, otherwise, hardware generates io page faults when
  * device access those pages. Therefore, p2m_ram_rw has to be defined as 0.
  */
 typedef enum {
@@ -212,9 +212,11 @@ struct p2m_domain {
     uint64_t           np2m_base;
     uint64_t           np2m_generation;
 
-    /* Nested p2ms: linked list of n2pms allocated to this domain. 
-     * The host p2m hasolds the head of the list and the np2ms are 
-     * threaded on in LRU order. */
+    /*
+     * Nested p2ms: linked list of n2pms allocated to this domain.
+     * The host p2m hasolds the head of the list and the np2ms are
+     * threaded on in LRU order.
+     */
     struct list_head   np2m_list;
 #endif
 
@@ -224,10 +226,12 @@ struct p2m_domain {
     /* Host p2m: Global log-dirty mode enabled for the domain. */
     bool_t             global_logdirty;
 
-    /* Host p2m: when this flag is set, don't flush all the nested-p2m 
-     * tables on every host-p2m change.  The setter of this flag 
+    /*
+     * Host p2m: when this flag is set, don't flush all the nested-p2m
+     * tables on every host-p2m change.  The setter of this flag
      * is responsible for performing the full flush before releasing the
-     * host p2m's lock. */
+     * host p2m's lock.
+     */
     int                defer_nested_flush;
 
 #ifdef CONFIG_HVM
@@ -264,7 +268,7 @@ struct p2m_domain {
                                                   unsigned long first_gfn,
                                                   unsigned long last_gfn);
     void               (*memory_type_changed)(struct p2m_domain *p2m);
-    
+
     void               (*write_p2m_entry)(struct p2m_domain *p2m,
                                           unsigned long gfn, l1_pgentry_t *p,
                                           l1_pgentry_t new, unsigned int 
level);
@@ -291,8 +295,10 @@ struct p2m_domain {
      * retyped get this access type.  See definition of p2m_access_t. */
     p2m_access_t default_access;
 
-    /* If true, and an access fault comes in and there is no vm_event 
listener, 
-     * pause domain.  Otherwise, remove access restrictions. */
+    /*
+     * If true, and an access fault comes in and there is no vm_event listener,
+     * pause domain.  Otherwise, remove access restrictions.
+     */
     bool_t       access_required;
 
     /* Highest guest frame that's ever been mapped in the p2m */
@@ -310,13 +316,15 @@ struct p2m_domain {
     unsigned long next_shared_gfn_to_relinquish;
 
 #ifdef CONFIG_HVM
-    /* Populate-on-demand variables
+    /*
+     * Populate-on-demand variables
      * All variables are protected with the pod lock. We cannot rely on
      * the p2m lock if it's turned into a fine-grained lock.
-     * We only use the domain page_alloc lock for additions and 
+     * We only use the domain page_alloc lock for additions and
      * deletions to the domain's page list. Because we use it nested
      * within the PoD lock, we enforce it's ordering (by remembering
-     * the unlock level in the arch_domain sub struct). */
+     * the unlock level in the arch_domain sub struct).
+     */
     struct {
         struct page_list_head super,   /* List of superpages                */
                          single;       /* Non-super lists                   */
@@ -426,13 +434,15 @@ mfn_t __nonnull(3, 4) __get_gfn_type_access(
     struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
     p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t locked);
 
-/* Read a particular P2M table, mapping pages as we go.  Most callers
+/*
+ * Read a particular P2M table, mapping pages as we go.  Most callers
  * should _not_ call this directly; use the other get_gfn* functions
  * below unless you know you want to walk a p2m that isn't a domain's
  * main one.
- * If the lookup succeeds, the return value is != INVALID_MFN and 
+ * If the lookup succeeds, the return value is != INVALID_MFN and
  * *page_order is filled in with the order of the superpage (if any) that
- * the entry was found in.  */
+ * the entry was found in.
+ */
 static inline mfn_t __nonnull(3, 4) get_gfn_type_access(
     struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t,
     p2m_access_t *a, p2m_query_t q, unsigned int *page_order)
@@ -459,10 +469,11 @@ void __put_gfn(struct p2m_domain *p2m, unsigned long gfn);
 
 #define put_gfn(d, gfn) __put_gfn(p2m_get_hostp2m((d)), (gfn))
 
-/* The intent of the "unlocked" accessor is to have the caller not worry about
- * put_gfn. They apply to very specific situations: debug printk's, dumps 
- * during a domain crash, or to peek at a p2m entry/type. Caller is not 
- * holding the p2m entry exclusively during or after calling this. 
+/*
+ * The intent of the "unlocked" accessor is to have the caller not worry about
+ * put_gfn. They apply to very specific situations: debug printk's, dumps
+ * during a domain crash, or to peek at a p2m entry/type. Caller is not
+ * holding the p2m entry exclusively during or after calling this.
  *
  * This is also used in the shadow code whenever the paging lock is
  * held -- in those cases, the caller is protected against concurrent
@@ -473,19 +484,21 @@ void __put_gfn(struct p2m_domain *p2m, unsigned long gfn);
  * Any other type of query can cause a change in the p2m and may need to
  * perform locking.
  */
-static inline mfn_t get_gfn_query_unlocked(struct domain *d, 
-                                           unsigned long gfn, 
+static inline mfn_t get_gfn_query_unlocked(struct domain *d,
+                                           unsigned long gfn,
                                            p2m_type_t *t)
 {
     p2m_access_t a;
     return __get_gfn_type_access(p2m_get_hostp2m(d), gfn, t, &a, 0, NULL, 0);
 }
 
-/* Atomically look up a GFN and take a reference count on the backing page.
+/*
+ * Atomically look up a GFN and take a reference count on the backing page.
  * This makes sure the page doesn't get freed (or shared) underfoot,
  * and should be used by any path that intends to write to the backing page.
  * Returns NULL if the page is not backed by RAM.
- * The caller is responsible for calling put_page() afterwards. */
+ * The caller is responsible for calling put_page() afterwards.
+ */
 struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn,
                                         p2m_type_t *t, p2m_access_t *a,
                                         p2m_query_t q);
@@ -525,7 +538,7 @@ struct two_gfns {
 /* Returns mfn, type and access for potential caller consumption, but any
  * of those can be NULL */
 static inline void get_two_gfns(struct domain *rd, unsigned long rgfn,
-        p2m_type_t *rt, p2m_access_t *ra, mfn_t *rmfn, struct domain *ld, 
+        p2m_type_t *rt, p2m_access_t *ra, mfn_t *rmfn, struct domain *ld,
         unsigned long lgfn, p2m_type_t *lt, p2m_access_t *la, mfn_t *lmfn,
         p2m_query_t q, struct two_gfns *rval)
 {
@@ -556,9 +569,9 @@ do {                                                    \
 #undef assign_pointers
 
     /* Now do the gets */
-    *first_mfn  = get_gfn_type_access(p2m_get_hostp2m(rval->first_domain), 
+    *first_mfn  = get_gfn_type_access(p2m_get_hostp2m(rval->first_domain),
                                       rval->first_gfn, first_t, first_a, q, 
NULL);
-    *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->second_domain), 
+    *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->second_domain),
                                       rval->second_gfn, second_t, second_a, q, 
NULL);
 }
 
@@ -574,9 +587,7 @@ static inline void put_two_gfns(struct two_gfns *arg)
 /* Init the datastructures for later use by the p2m code */
 int p2m_init(struct domain *d);
 
-/* Allocate a new p2m table for a domain. 
- *
- * Returns 0 for success or -errno. */
+/* Allocate a new p2m table for a domain.  Returns 0 for success or -errno. */
 int p2m_alloc_table(struct p2m_domain *p2m);
 
 /* Return all the p2m resources to Xen. */
@@ -610,11 +621,11 @@ void p2m_disable_hardware_log_dirty(struct domain *d);
 void p2m_flush_hardware_cached_dirty(struct domain *d);
 
 /* Change types across all p2m entries in a domain */
-void p2m_change_entry_type_global(struct domain *d, 
+void p2m_change_entry_type_global(struct domain *d,
                                   p2m_type_t ot, p2m_type_t nt);
 
 /* Change types across a range of p2m entries (start ... end-1) */
-void p2m_change_type_range(struct domain *d, 
+void p2m_change_type_range(struct domain *d,
                            unsigned long start, unsigned long end,
                            p2m_type_t ot, p2m_type_t nt);
 
@@ -651,7 +662,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned 
long gfn);
 int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
                     unsigned long gpfn, domid_t foreign_domid);
 
-/* 
+/*
  * Populate-on-demand
  */
 
@@ -732,7 +743,7 @@ int p2m_mem_paging_nominate(struct domain *d, unsigned long 
gfn);
 /* Evict a frame */
 int p2m_mem_paging_evict(struct domain *d, unsigned long gfn);
 /* Tell xenpaging to drop a paged out frame */
-void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn, 
+void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn,
                                 p2m_type_t p2mt);
 /* Start populating a paged out frame */
 void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
@@ -741,7 +752,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
gfn, uint64_t buffer);
 /* Resume normal operation (in case a domain was paused) */
 void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp);
 
-/* 
+/*
  * Internal functions, only called by other p2m code
  */
 
@@ -796,7 +807,7 @@ static inline p2m_type_t p2m_flags_to_type(unsigned long 
flags)
 {
     /* For AMD IOMMUs we need to use type 0 for plain RAM, but we need
      * to make sure that an entirely empty PTE doesn't have RAM type */
-    if ( flags == 0 ) 
+    if ( flags == 0 )
         return p2m_invalid;
     /* AMD IOMMUs use bits 9-11 to encode next io page level and bits
      * 59-62 for iommu flags so we can't use them to store p2m type info. */
@@ -828,7 +839,7 @@ static inline p2m_type_t p2m_recalc_type(bool recalc, 
p2m_type_t t,
 int p2m_pt_handle_deferred_changes(uint64_t gpa);
 
 /*
- * Nested p2m: shadow p2m tables used for nested HVM virtualization 
+ * Nested p2m: shadow p2m tables used for nested HVM virtualization
  */
 
 /* Flushes specified p2m table */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.