[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/mem_sharing: cleanup code and comments in various locations



commit ca14eba3ccb92a1219a12e8fe671f8e5a96ccf31
Author:     Tamas K Lengyel <tamas.lengyel@xxxxxxxxx>
AuthorDate: Wed Dec 18 11:40:41 2019 -0800
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Thu Dec 19 18:05:11 2019 +0000

    x86/mem_sharing: cleanup code and comments in various locations
    
    No functional changes.
    
    Signed-off-by: Tamas K Lengyel <tamas.lengyel@xxxxxxxxx>
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    [Further cleanup]
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |  13 +-
 xen/arch/x86/mm/mem_sharing.c     | 898 ++++++++++++++++++++------------------
 xen/arch/x86/mm/p2m.c             |  17 +-
 xen/include/asm-x86/mem_sharing.h |  40 +-
 4 files changed, 512 insertions(+), 456 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 614ed60fe4..3cc6d22dfb 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1884,7 +1884,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
      * If this GFN is emulated MMIO or marked as read-only, pass the fault
      * to the mmio handler.
      */
-    if ( (p2mt == p2m_mmio_dm) || 
+    if ( (p2mt == p2m_mmio_dm) ||
          (npfec.write_access &&
           (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
     {
@@ -1902,12 +1902,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned 
long gla,
     if ( npfec.write_access && (p2mt == p2m_ram_shared) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        sharing_enomem = 
-            (mem_sharing_unshare_page(currd, gfn, 0) < 0);
+        sharing_enomem = mem_sharing_unshare_page(currd, gfn, 0);
         rc = 1;
         goto out_put_gfn;
     }
- 
+
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
     {
@@ -1953,9 +1952,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
         __put_gfn(p2m, gfn);
     __put_gfn(hostp2m, gfn);
  out:
-    /* All of these are delayed until we exit, since we might 
+    /*
+     * All of these are delayed until we exit, since we might
      * sleep on event ring wait queues, and we must not hold
-     * locks in such circumstance */
+     * locks in such circumstance.
+     */
     if ( paged )
         p2m_mem_paging_populate(currd, gfn);
     if ( sharing_enomem )
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index efb8821768..ddf1f0f9f9 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -59,8 +59,10 @@ static DEFINE_PER_CPU(pg_lock_data_t, __pld);
 #define RMAP_USES_HASHTAB(page) \
         ((page)->sharing->hash_table.flag == NULL)
 #define RMAP_HEAVY_SHARED_PAGE   RMAP_HASHTAB_SIZE
-/* A bit of hysteresis. We don't want to be mutating between list and hash
- * table constantly. */
+/*
+ * A bit of hysteresis. We don't want to be mutating between list and hash
+ * table constantly.
+ */
 #define RMAP_LIGHT_SHARED_PAGE   (RMAP_HEAVY_SHARED_PAGE >> 2)
 
 #if MEM_SHARING_AUDIT
@@ -75,7 +77,7 @@ static void _free_pg_shared_info(struct rcu_head *head)
     xfree(container_of(head, struct page_sharing_info, rcu_head));
 }
 
-static inline void audit_add_list(struct page_info *page)
+static void audit_add_list(struct page_info *page)
 {
     INIT_LIST_HEAD(&page->sharing->entry);
     spin_lock(&shr_audit_lock);
@@ -84,12 +86,12 @@ static inline void audit_add_list(struct page_info *page)
 }
 
 /* Removes from the audit list and cleans up the page sharing metadata. */
-static inline void page_sharing_dispose(struct page_info *page)
+static void page_sharing_dispose(struct page_info *page)
 {
     /* Unlikely given our thresholds, but we should be careful. */
     if ( unlikely(RMAP_USES_HASHTAB(page)) )
-        free_xenheap_pages(page->sharing->hash_table.bucket, 
-                            RMAP_HASHTAB_ORDER);
+        free_xenheap_pages(page->sharing->hash_table.bucket,
+                           RMAP_HASHTAB_ORDER);
 
     spin_lock(&shr_audit_lock);
     list_del_rcu(&page->sharing->entry);
@@ -101,12 +103,12 @@ static inline void page_sharing_dispose(struct page_info 
*page)
 #else
 
 #define audit_add_list(p)  ((void)0)
-static inline void page_sharing_dispose(struct page_info *page)
+static void page_sharing_dispose(struct page_info *page)
 {
     /* Unlikely given our thresholds, but we should be careful. */
     if ( unlikely(RMAP_USES_HASHTAB(page)) )
-        free_xenheap_pages(page->sharing->hash_table.bucket, 
-                            RMAP_HASHTAB_ORDER);
+        free_xenheap_pages(page->sharing->hash_table.bucket,
+                           RMAP_HASHTAB_ORDER);
     xfree(page->sharing);
 }
 
@@ -122,12 +124,12 @@ static inline void page_sharing_dispose(struct page_info 
*page)
  * Nesting may happen when sharing (and locking) two pages.
  * Deadlock is avoided by locking pages in increasing order.
  * All memory sharing code paths take the p2m lock of the affected gfn before
- * taking the lock for the underlying page. We enforce ordering between 
page_lock
- * and p2m_lock using an mm-locks.h construct.
+ * taking the lock for the underlying page. We enforce ordering between
+ * page_lock and p2m_lock using an mm-locks.h construct.
  *
  * TODO: Investigate if PGT_validated is necessary.
  */
-static inline bool _page_lock(struct page_info *page)
+static bool _page_lock(struct page_info *page)
 {
     unsigned long x, nx;
 
@@ -144,7 +146,7 @@ static inline bool _page_lock(struct page_info *page)
     return true;
 }
 
-static inline void _page_unlock(struct page_info *page)
+static void _page_unlock(struct page_info *page)
 {
     unsigned long x, nx, y = page->u.inuse.type_info;
 
@@ -158,7 +160,7 @@ static inline void _page_unlock(struct page_info *page)
     } while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
 }
 
-static inline bool mem_sharing_page_lock(struct page_info *pg)
+static bool mem_sharing_page_lock(struct page_info *pg)
 {
     bool rc;
     pg_lock_data_t *pld = &(this_cpu(__pld));
@@ -168,25 +170,25 @@ static inline bool mem_sharing_page_lock(struct page_info 
*pg)
     if ( rc )
     {
         preempt_disable();
-        page_sharing_mm_post_lock(&pld->mm_unlock_level, 
+        page_sharing_mm_post_lock(&pld->mm_unlock_level,
                                   &pld->recurse_count);
     }
     return rc;
 }
 
-static inline void mem_sharing_page_unlock(struct page_info *pg)
+static void mem_sharing_page_unlock(struct page_info *pg)
 {
     pg_lock_data_t *pld = &(this_cpu(__pld));
 
-    page_sharing_mm_unlock(pld->mm_unlock_level, 
+    page_sharing_mm_unlock(pld->mm_unlock_level,
                            &pld->recurse_count);
     preempt_enable();
     _page_unlock(pg);
 }
 
-static inline shr_handle_t get_next_handle(void)
+static shr_handle_t get_next_handle(void)
 {
-    /* Get the next handle get_page style */ 
+    /* Get the next handle get_page style */
     uint64_t x, y = next_handle;
     do {
         x = y;
@@ -198,24 +200,26 @@ static inline shr_handle_t get_next_handle(void)
 #define mem_sharing_enabled(d) \
     (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled)
 
-static atomic_t nr_saved_mfns   = ATOMIC_INIT(0); 
+static atomic_t nr_saved_mfns   = ATOMIC_INIT(0);
 static atomic_t nr_shared_mfns  = ATOMIC_INIT(0);
 
-/** Reverse map **/
-/* Every shared frame keeps a reverse map (rmap) of <domain, gfn> tuples that
+/*
+ * Reverse map
+ *
+ * Every shared frame keeps a reverse map (rmap) of <domain, gfn> tuples that
  * this shared frame backs. For pages with a low degree of sharing, a O(n)
  * search linked list is good enough. For pages with higher degree of sharing,
- * we use a hash table instead. */
+ * we use a hash table instead.
+ */
 
 typedef struct gfn_info
 {
     unsigned long gfn;
-    domid_t domain; 
+    domid_t domain;
     struct list_head list;
 } gfn_info_t;
 
-static inline void
-rmap_init(struct page_info *page)
+static void rmap_init(struct page_info *page)
 {
     /* We always start off as a doubly linked list. */
     INIT_LIST_HEAD(&page->sharing->gfns);
@@ -225,10 +229,11 @@ rmap_init(struct page_info *page)
 #define HASH(domain, gfn)       \
     (((gfn) + (domain)) % RMAP_HASHTAB_SIZE)
 
-/* Conversions. Tuned by the thresholds. Should only happen twice 
- * (once each) during the lifetime of a shared page */
-static inline int
-rmap_list_to_hash_table(struct page_info *page)
+/*
+ * Conversions. Tuned by the thresholds. Should only happen twice
+ * (once each) during the lifetime of a shared page.
+ */
+static inline int rmap_list_to_hash_table(struct page_info *page)
 {
     unsigned int i;
     struct list_head *pos, *tmp, *b =
@@ -240,10 +245,11 @@ rmap_list_to_hash_table(struct page_info *page)
     for ( i = 0; i < RMAP_HASHTAB_SIZE; i++ )
         INIT_LIST_HEAD(b + i);
 
-    list_for_each_safe(pos, tmp, &page->sharing->gfns)
+    list_for_each_safe ( pos, tmp, &page->sharing->gfns )
     {
         gfn_info_t *gfn_info = list_entry(pos, gfn_info_t, list);
         struct list_head *bucket = b + HASH(gfn_info->domain, gfn_info->gfn);
+
         list_del(pos);
         list_add(pos, bucket);
     }
@@ -254,8 +260,7 @@ rmap_list_to_hash_table(struct page_info *page)
     return 0;
 }
 
-static inline void
-rmap_hash_table_to_list(struct page_info *page)
+static void rmap_hash_table_to_list(struct page_info *page)
 {
     unsigned int i;
     struct list_head *bucket = page->sharing->hash_table.bucket;
@@ -265,7 +270,8 @@ rmap_hash_table_to_list(struct page_info *page)
     for ( i = 0; i < RMAP_HASHTAB_SIZE; i++ )
     {
         struct list_head *pos, *tmp, *head = bucket + i;
-        list_for_each_safe(pos, tmp, head)
+
+        list_for_each_safe ( pos, tmp, head )
         {
             list_del(pos);
             list_add(pos, &page->sharing->gfns);
@@ -276,22 +282,23 @@ rmap_hash_table_to_list(struct page_info *page)
 }
 
 /* Generic accessors to the rmap */
-static inline unsigned long
-rmap_count(struct page_info *pg)
+static unsigned long rmap_count(const struct page_info *pg)
 {
     unsigned long count;
     unsigned long t = read_atomic(&pg->u.inuse.type_info);
+
     count = t & PGT_count_mask;
     if ( t & PGT_locked )
         count--;
     return count;
 }
 
-/* The page type count is always decreased after removing from the rmap.
- * Use a convert flag to avoid mutating the rmap if in the middle of an 
- * iterator, or if the page will be soon destroyed anyways. */
-static inline void
-rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
+/*
+ * The page type count is always decreased after removing from the rmap.
+ * Use a convert flag to avoid mutating the rmap if in the middle of an
+ * iterator, or if the page will be soon destroyed anyways.
+ */
+static void rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
 {
     if ( RMAP_USES_HASHTAB(page) && convert &&
          (rmap_count(page) <= RMAP_LIGHT_SHARED_PAGE) )
@@ -302,38 +309,38 @@ rmap_del(gfn_info_t *gfn_info, struct page_info *page, 
int convert)
 }
 
 /* The page type count is always increased before adding to the rmap. */
-static inline void
-rmap_add(gfn_info_t *gfn_info, struct page_info *page)
+static void rmap_add(gfn_info_t *gfn_info, struct page_info *page)
 {
     struct list_head *head;
 
     if ( !RMAP_USES_HASHTAB(page) &&
          (rmap_count(page) >= RMAP_HEAVY_SHARED_PAGE) )
-        /* The conversion may fail with ENOMEM. We'll be less efficient,
-         * but no reason to panic. */
+        /*
+         * The conversion may fail with ENOMEM. We'll be less efficient,
+         * but no reason to panic.
+         */
         (void)rmap_list_to_hash_table(page);
 
-    head = (RMAP_USES_HASHTAB(page)) ?
-        page->sharing->hash_table.bucket + 
-                            HASH(gfn_info->domain, gfn_info->gfn) :
-        &page->sharing->gfns;
+    head = (RMAP_USES_HASHTAB(page)
+            ? page->sharing->hash_table.bucket + HASH(gfn_info->domain,
+                                                      gfn_info->gfn)
+            : &page->sharing->gfns);
 
     INIT_LIST_HEAD(&gfn_info->list);
     list_add(&gfn_info->list, head);
 }
 
-static inline gfn_info_t *
-rmap_retrieve(uint16_t domain_id, unsigned long gfn, 
-                            struct page_info *page)
+static gfn_info_t *rmap_retrieve(uint16_t domain_id, unsigned long gfn,
+                                 struct page_info *page)
 {
     gfn_info_t *gfn_info;
     struct list_head *le, *head;
 
-    head = (RMAP_USES_HASHTAB(page)) ?
-        page->sharing->hash_table.bucket + HASH(domain_id, gfn) :
-        &page->sharing->gfns;
+    head = (RMAP_USES_HASHTAB(page)
+            ? page->sharing->hash_table.bucket + HASH(domain_id, gfn)
+            : &page->sharing->gfns);
 
-    list_for_each(le, head)
+    list_for_each ( le, head )
     {
         gfn_info = list_entry(le, gfn_info_t, list);
         if ( (gfn_info->gfn == gfn) && (gfn_info->domain == domain_id) )
@@ -345,43 +352,44 @@ rmap_retrieve(uint16_t domain_id, unsigned long gfn,
 }
 
 /* Returns true if the rmap has only one entry. O(1) complexity. */
-static inline int rmap_has_one_entry(struct page_info *page)
+static bool rmap_has_one_entry(const struct page_info *page)
 {
-    return (rmap_count(page) == 1);
+    return rmap_count(page) == 1;
 }
 
 /* Returns true if the rmap has any entries. O(1) complexity. */
-static inline int rmap_has_entries(struct page_info *page)
+static bool rmap_has_entries(const struct page_info *page)
 {
-    return (rmap_count(page) != 0);
+    return rmap_count(page) != 0;
 }
 
-/* The iterator hides the details of how the rmap is implemented. This
- * involves splitting the list_for_each_safe macro into two steps. */
+/*
+ * The iterator hides the details of how the rmap is implemented. This
+ * involves splitting the list_for_each_safe macro into two steps.
+ */
 struct rmap_iterator {
     struct list_head *curr;
     struct list_head *next;
     unsigned int bucket;
 };
 
-static inline void
-rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri)
+static void rmap_seed_iterator(struct page_info *page, struct rmap_iterator 
*ri)
 {
-    ri->curr = (RMAP_USES_HASHTAB(page)) ?
-                page->sharing->hash_table.bucket :
-                &page->sharing->gfns;
-    ri->next = ri->curr->next; 
+    ri->curr = (RMAP_USES_HASHTAB(page)
+                ? page->sharing->hash_table.bucket
+                : &page->sharing->gfns);
+    ri->next = ri->curr->next;
     ri->bucket = 0;
 }
 
-static inline gfn_info_t *
-rmap_iterate(struct page_info *page, struct rmap_iterator *ri)
+static gfn_info_t *rmap_iterate(struct page_info *page,
+                                struct rmap_iterator *ri)
 {
-    struct list_head *head = (RMAP_USES_HASHTAB(page)) ?
-                page->sharing->hash_table.bucket + ri->bucket :
-                &page->sharing->gfns;
+    struct list_head *head = (RMAP_USES_HASHTAB(page)
+                              ? page->sharing->hash_table.bucket + ri->bucket
+                              : &page->sharing->gfns);
 
-retry:
+ retry:
     if ( ri->next == head)
     {
         if ( RMAP_USES_HASHTAB(page) )
@@ -394,7 +402,8 @@ retry:
             ri->curr = head;
             ri->next = ri->curr->next;
             goto retry;
-        } else
+        }
+        else
             /* List exhausted */
             return NULL;
     }
@@ -405,14 +414,13 @@ retry:
     return list_entry(ri->curr, gfn_info_t, list);
 }
 
-static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page,
-                                                struct domain *d,
-                                                unsigned long gfn)
+static gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page,
+                                         struct domain *d, unsigned long gfn)
 {
     gfn_info_t *gfn_info = xmalloc(gfn_info_t);
 
     if ( gfn_info == NULL )
-        return NULL; 
+        return NULL;
 
     gfn_info->gfn = gfn;
     gfn_info->domain = d->domain_id;
@@ -425,9 +433,8 @@ static inline gfn_info_t *mem_sharing_gfn_alloc(struct 
page_info *page,
     return gfn_info;
 }
 
-static inline void mem_sharing_gfn_destroy(struct page_info *page,
-                                           struct domain *d,
-                                           gfn_info_t *gfn_info)
+static void mem_sharing_gfn_destroy(struct page_info *page, struct domain *d,
+                                    gfn_info_t *gfn_info)
 {
     /* Decrement the number of pages. */
     atomic_dec(&d->shr_pages);
@@ -437,25 +444,29 @@ static inline void mem_sharing_gfn_destroy(struct 
page_info *page,
     xfree(gfn_info);
 }
 
-static struct page_info* mem_sharing_lookup(unsigned long mfn)
+static struct page_info *mem_sharing_lookup(unsigned long mfn)
 {
-    if ( mfn_valid(_mfn(mfn)) )
-    {
-        struct page_info* page = mfn_to_page(_mfn(mfn));
-        if ( page_get_owner(page) == dom_cow )
-        {
-            /* Count has to be at least two, because we're called
-             * with the mfn locked (1) and this is supposed to be 
-             * a shared page (1). */
-            unsigned long t = read_atomic(&page->u.inuse.type_info);
-            ASSERT((t & PGT_type_mask) == PGT_shared_page);
-            ASSERT((t & PGT_count_mask) >= 2);
-            ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
-            return page;
-        }
-    }
+    struct page_info *page;
+    unsigned long t;
 
-    return NULL;
+    if ( !mfn_valid(_mfn(mfn)) )
+        return NULL;
+
+    page = mfn_to_page(_mfn(mfn));
+    if ( page_get_owner(page) != dom_cow )
+        return NULL;
+
+    /*
+     * Count has to be at least two, because we're called
+     * with the mfn locked (1) and this is supposed to be
+     * a shared page (1).
+     */
+    t = read_atomic(&page->u.inuse.type_info);
+    ASSERT((t & PGT_type_mask) == PGT_shared_page);
+    ASSERT((t & PGT_count_mask) >= 2);
+    ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
+
+    return page;
 }
 
 static int audit(void)
@@ -470,7 +481,7 @@ static int audit(void)
 
     rcu_read_lock(&shr_audit_read_lock);
 
-    list_for_each_rcu(ae, &shr_audit_list)
+    list_for_each_rcu ( ae, &shr_audit_list )
     {
         struct page_sharing_info *pg_shared_info;
         unsigned long nr_gfns = 0;
@@ -486,44 +497,46 @@ static int audit(void)
         /* If we can't lock it, it's definitely not a shared page */
         if ( !mem_sharing_page_lock(pg) )
         {
-           MEM_SHARING_DEBUG("mfn %lx in audit list, but cannot be locked 
(%lx)!\n",
-                              mfn_x(mfn), pg->u.inuse.type_info);
-           errors++;
-           continue;
+            MEM_SHARING_DEBUG(
+                "mfn %lx in audit list, but cannot be locked (%lx)!\n",
+                mfn_x(mfn), pg->u.inuse.type_info);
+            errors++;
+            continue;
         }
 
-        /* Check if the MFN has correct type, owner and handle. */ 
+        /* Check if the MFN has correct type, owner and handle. */
         if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_shared_page )
         {
-           MEM_SHARING_DEBUG("mfn %lx in audit list, but not PGT_shared_page 
(%lx)!\n",
-                              mfn_x(mfn), pg->u.inuse.type_info & 
PGT_type_mask);
-           errors++;
-           continue;
+            MEM_SHARING_DEBUG(
+                "mfn %lx in audit list, but not PGT_shared_page (%lx)!\n",
+                mfn_x(mfn), pg->u.inuse.type_info & PGT_type_mask);
+            errors++;
+            continue;
         }
 
         /* Check the page owner. */
         if ( page_get_owner(pg) != dom_cow )
         {
-           MEM_SHARING_DEBUG("mfn %lx shared, but wrong owner (%hu)!\n",
-                             mfn_x(mfn), page_get_owner(pg)->domain_id);
-           errors++;
+            MEM_SHARING_DEBUG("mfn %lx shared, but wrong owner %pd!\n",
+                              mfn_x(mfn), page_get_owner(pg));
+            errors++;
         }
 
         /* Check the m2p entry */
         if ( !SHARED_M2P(get_gpfn_from_mfn(mfn_x(mfn))) )
         {
-           MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n",
-                             mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn)));
-           errors++;
+            MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n",
+                              mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn)));
+            errors++;
         }
 
         /* Check we have a list */
         if ( (!pg->sharing) || !rmap_has_entries(pg) )
         {
-           MEM_SHARING_DEBUG("mfn %lx shared, but empty gfn list!\n",
-                             mfn_x(mfn));
-           errors++;
-           continue;
+            MEM_SHARING_DEBUG("mfn %lx shared, but empty gfn list!\n",
+                              mfn_x(mfn));
+            errors++;
+            continue;
         }
 
         /* We've found a page that is shared */
@@ -545,7 +558,7 @@ static int audit(void)
                 errors++;
                 continue;
             }
-            o_mfn = get_gfn_query_unlocked(d, g->gfn, &t); 
+            o_mfn = get_gfn_query_unlocked(d, g->gfn, &t);
             if ( !mfn_eq(o_mfn, mfn) )
             {
                 MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx."
@@ -568,7 +581,7 @@ static int audit(void)
         {
             MEM_SHARING_DEBUG("Mismatched counts for MFN=%lx."
                               "nr_gfns in list %lu, in type_info %lx\n",
-                              mfn_x(mfn), nr_gfns, 
+                              mfn_x(mfn), nr_gfns,
                               (pg->u.inuse.type_info & PGT_count_mask));
             errors++;
         }
@@ -600,11 +613,11 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned 
long gfn,
         .reason = VM_EVENT_REASON_MEM_SHARING,
         .vcpu_id = v->vcpu_id,
         .u.mem_sharing.gfn = gfn,
-        .u.mem_sharing.p2mt = p2m_ram_shared
+        .u.mem_sharing.p2mt = p2m_ram_shared,
     };
 
-    if ( (rc = __vm_event_claim_slot(d, 
-                        d->vm_event_share, allow_sleep)) < 0 )
+    if ( (rc = __vm_event_claim_slot(
+              d, d->vm_event_share, allow_sleep)) < 0 )
         return rc;
 
     if ( v->domain == d )
@@ -620,18 +633,18 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned 
long gfn,
 
 unsigned int mem_sharing_get_nr_saved_mfns(void)
 {
-    return ((unsigned int)atomic_read(&nr_saved_mfns));
+    return atomic_read(&nr_saved_mfns);
 }
 
 unsigned int mem_sharing_get_nr_shared_mfns(void)
 {
-    return (unsigned int)atomic_read(&nr_shared_mfns);
+    return atomic_read(&nr_shared_mfns);
 }
 
 /* Functions that change a page's type and ownership */
-static int page_make_sharable(struct domain *d, 
-                       struct page_info *page, 
-                       int expected_refcnt)
+static int page_make_sharable(struct domain *d,
+                              struct page_info *page,
+                              int expected_refcnt)
 {
     bool_t drop_dom_ref;
 
@@ -658,8 +671,10 @@ static int page_make_sharable(struct domain *d,
         return -EEXIST;
     }
 
-    /* Check if the ref count is 2. The first from PGC_allocated, and
-     * the second from get_page_and_type at the top of this function */
+    /*
+     * Check if the ref count is 2. The first from PGC_allocated, and
+     * the second from get_page_and_type at the top of this function.
+     */
     if ( page->count_info != (PGC_allocated | (2 + expected_refcnt)) )
     {
         spin_unlock(&d->page_alloc_lock);
@@ -684,7 +699,7 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
 
     if ( !get_page(page, dom_cow) )
         return -EINVAL;
-    
+
     spin_lock(&d->page_alloc_lock);
 
     if ( d->is_dying )
@@ -721,16 +736,19 @@ static int page_make_private(struct domain *d, struct 
page_info *page)
     return 0;
 }
 
-static inline struct page_info *__grab_shared_page(mfn_t mfn)
+static struct page_info *__grab_shared_page(mfn_t mfn)
 {
     struct page_info *pg = NULL;
 
     if ( !mfn_valid(mfn) )
         return NULL;
+
     pg = mfn_to_page(mfn);
 
-    /* If the page is not validated we can't lock it, and if it's  
-     * not validated it's obviously not shared. */
+    /*
+     * If the page is not validated we can't lock it, and if it's
+     * not validated it's obviously not shared.
+     */
     if ( !mem_sharing_page_lock(pg) )
         return NULL;
 
@@ -754,12 +772,10 @@ static int debug_mfn(mfn_t mfn)
         return -EINVAL;
     }
 
-    MEM_SHARING_DEBUG( 
-            "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n",
-            mfn_x(page_to_mfn(page)), 
-            page->count_info, 
-            page->u.inuse.type_info,
-            page_get_owner(page)->domain_id);
+    MEM_SHARING_DEBUG(
+        "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner=%pd\n",
+        mfn_x(page_to_mfn(page)), page->count_info,
+        page->u.inuse.type_info, page_get_owner(page));
 
     /* -1 because the page is locked and that's an additional type ref */
     num_refs = ((int) (page->u.inuse.type_info & PGT_count_mask)) - 1;
@@ -775,7 +791,7 @@ static int debug_gfn(struct domain *d, gfn_t gfn)
 
     mfn = get_gfn_query(d, gfn_x(gfn), &p2mt);
 
-    MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n", 
+    MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n",
                       d->domain_id, gfn_x(gfn));
     num_refs = debug_mfn(mfn);
     put_gfn(d, gfn_x(gfn));
@@ -796,10 +812,9 @@ static int debug_gref(struct domain *d, grant_ref_t ref)
                           d->domain_id, ref, rc);
         return rc;
     }
-    
-    MEM_SHARING_DEBUG(
-            "==> Grant [dom=%d,ref=%d], status=%x. ", 
-            d->domain_id, ref, status);
+
+    MEM_SHARING_DEBUG("==> Grant [dom=%d,ref=%d], status=%x. ",
+                      d->domain_id, ref, status);
 
     return debug_gfn(d, gfn);
 }
@@ -824,15 +839,12 @@ static int nominate_page(struct domain *d, gfn_t gfn,
         goto out;
 
     /* Return the handle if the page is already shared */
-    if ( p2m_is_shared(p2mt) ) {
+    if ( p2m_is_shared(p2mt) )
+    {
         struct page_info *pg = __grab_shared_page(mfn);
         if ( !pg )
-        {
-            gprintk(XENLOG_ERR,
-                    "Shared p2m entry gfn %" PRI_gfn ", but could not grab mfn 
%" PRI_mfn " dom%d\n",
-                    gfn_x(gfn), mfn_x(mfn), d->domain_id);
             BUG();
-        }
+
         *phandle = pg->sharing->handle;
         ret = 0;
         mem_sharing_page_unlock(pg);
@@ -843,7 +855,6 @@ static int nominate_page(struct domain *d, gfn_t gfn,
     if ( !p2m_is_sharable(p2mt) )
         goto out;
 
-#ifdef CONFIG_HVM
     /* Check if there are mem_access/remapped altp2m entries for this page */
     if ( altp2m_active(d) )
     {
@@ -872,42 +883,42 @@ static int nominate_page(struct domain *d, gfn_t gfn,
 
         altp2m_list_unlock(d);
     }
-#endif
 
     /* Try to convert the mfn to the sharable type */
     page = mfn_to_page(mfn);
-    ret = page_make_sharable(d, page, expected_refcnt); 
-    if ( ret ) 
+    ret = page_make_sharable(d, page, expected_refcnt);
+    if ( ret )
         goto out;
 
-    /* Now that the page is validated, we can lock it. There is no 
-     * race because we're holding the p2m entry, so no one else 
-     * could be nominating this gfn */
+    /*
+     * Now that the page is validated, we can lock it. There is no
+     * race because we're holding the p2m entry, so no one else
+     * could be nominating this gfn.
+     */
     ret = -ENOENT;
     if ( !mem_sharing_page_lock(page) )
         goto out;
 
     /* Initialize the shared state */
     ret = -ENOMEM;
-    if ( (page->sharing = 
-            xmalloc(struct page_sharing_info)) == NULL )
+    if ( !(page->sharing = xmalloc(struct page_sharing_info)) )
     {
         /* Making a page private atomically unlocks it */
-        BUG_ON(page_make_private(d, page) != 0);
+        BUG_ON(page_make_private(d, page));
         goto out;
     }
     page->sharing->pg = page;
     rmap_init(page);
 
     /* Create the handle */
-    page->sharing->handle = get_next_handle();  
+    page->sharing->handle = get_next_handle();
 
     /* Create the local gfn info */
-    if ( mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) == NULL )
+    if ( !mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) )
     {
         xfree(page->sharing);
         page->sharing = NULL;
-        BUG_ON(page_make_private(d, page) != 0);
+        BUG_ON(page_make_private(d, page));
         goto out;
     }
 
@@ -946,19 +957,24 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
     get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
                  cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
 
-    /* This tricky business is to avoid two callers deadlocking if 
-     * grabbing pages in opposite client/source order */
+    /*
+     * This tricky business is to avoid two callers deadlocking if
+     * grabbing pages in opposite client/source order.
+     */
     if ( mfn_eq(smfn, cmfn) )
     {
-        /* The pages are already the same.  We could return some
+        /*
+         * The pages are already the same.  We could return some
          * kind of error here, but no matter how you look at it,
          * the pages are already 'shared'.  It possibly represents
          * a big problem somewhere else, but as far as sharing is
-         * concerned: great success! */
+         * concerned: great success!
+         */
         ret = 0;
         goto err_out;
     }
-    else if ( mfn_x(smfn) < mfn_x(cmfn) )
+
+    if ( mfn_x(smfn) < mfn_x(cmfn) )
     {
         ret = XENMEM_SHARING_OP_S_HANDLE_INVALID;
         spage = firstpg = __grab_shared_page(smfn);
@@ -972,7 +988,9 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
             mem_sharing_page_unlock(spage);
             goto err_out;
         }
-    } else {
+    }
+    else
+    {
         ret = XENMEM_SHARING_OP_C_HANDLE_INVALID;
         cpage = firstpg = __grab_shared_page(cmfn);
         if ( cpage == NULL )
@@ -998,6 +1016,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
         mem_sharing_page_unlock(firstpg);
         goto err_out;
     }
+
     if ( cpage->sharing->handle != ch )
     {
         ret = XENMEM_SHARING_OP_C_HANDLE_INVALID;
@@ -1010,11 +1029,15 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
     rmap_seed_iterator(cpage, &ri);
     while ( (gfn = rmap_iterate(cpage, &ri)) != NULL)
     {
-        /* Get the source page and type, this should never fail: 
-         * we are under shr lock, and got a successful lookup */
+        /*
+         * Get the source page and type, this should never fail:
+         * we are under shr lock, and got a successful lookup.
+         */
         BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
-        /* Move the gfn_info from client list to source list.
-         * Don't change the type of rmap for the client page. */
+        /*
+         * Move the gfn_info from client list to source list.
+         * Don't change the type of rmap for the client page.
+         */
         rmap_del(gfn, cpage, 0);
         rmap_add(gfn, spage);
         put_count++;
@@ -1043,14 +1066,14 @@ static int share_pages(struct domain *sd, gfn_t sgfn, 
shr_handle_t sh,
     atomic_dec(&nr_shared_mfns);
     atomic_inc(&nr_saved_mfns);
     ret = 0;
-    
+
 err_out:
     put_two_gfns(&tg);
     return ret;
 }
 
 int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, 
shr_handle_t sh,
-                            struct domain *cd, unsigned long cgfn) 
+                               struct domain *cd, unsigned long cgfn)
 {
     struct page_info *spage;
     int ret = -EINVAL;
@@ -1069,15 +1092,18 @@ int mem_sharing_add_to_physmap(struct domain *sd, 
unsigned long sgfn, shr_handle
     spage = __grab_shared_page(smfn);
     if ( spage == NULL )
         goto err_out;
+
     ASSERT(smfn_type == p2m_ram_shared);
 
     /* Check that the handles match */
     if ( spage->sharing->handle != sh )
         goto err_unlock;
 
-    /* Make sure the target page is a hole in the physmap. These are typically
+    /*
+     * Make sure the target page is a hole in the physmap. These are typically
      * p2m_mmio_dm, but also accept p2m_invalid and paged out pages. See the
-     * definition of p2m_is_hole in p2m.h. */
+     * definition of p2m_is_hole in p2m.h.
+     */
     if ( !p2m_is_hole(cmfn_type) )
     {
         ret = XENMEM_SHARING_OP_C_HANDLE_INVALID;
@@ -1086,7 +1112,7 @@ int mem_sharing_add_to_physmap(struct domain *sd, 
unsigned long sgfn, shr_handle
 
     /* This is simpler than regular sharing */
     BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
-    if ( (gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) == NULL )
+    if ( !(gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) )
     {
         put_page_and_type(spage);
         ret = -ENOMEM;
@@ -1101,12 +1127,20 @@ int mem_sharing_add_to_physmap(struct domain *sd, 
unsigned long sgfn, shr_handle
     {
         mem_sharing_gfn_destroy(spage, cd, gfn_info);
         put_page_and_type(spage);
-    } else {
-        /* There is a chance we're plugging a hole where a paged out page was 
*/
+    }
+    else
+    {
+        /*
+         * There is a chance we're plugging a hole where a paged out
+         * page was.
+         */
         if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) )
         {
             atomic_dec(&cd->paged_pages);
-            /* Further, there is a chance this was a valid page. Don't leak 
it. */
+            /*
+             * Further, there is a chance this was a valid page.
+             * Don't leak it.
+             */
             if ( mfn_valid(cmfn) )
             {
                 struct page_info *cpage = mfn_to_page(cmfn);
@@ -1133,13 +1167,14 @@ err_out:
 }
 
 
-/* A note on the rationale for unshare error handling:
+/*
+ * A note on the rationale for unshare error handling:
  *  1. Unshare can only fail with ENOMEM. Any other error conditions BUG_ON()'s
  *  2. We notify a potential dom0 helper through a vm_event ring. But we
- *     allow the notification to not go to sleep. If the event ring is full 
+ *     allow the notification to not go to sleep. If the event ring is full
  *     of ENOMEM warnings, then it's on the ball.
  *  3. We cannot go to sleep until the unshare is resolved, because we might
- *     be buried deep into locks (e.g. something -> copy_to_user -> 
__hvm_copy) 
+ *     be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy)
  *  4. So, we make sure we:
  *     4.1. return an error
  *     4.2. do not corrupt shared memory
@@ -1147,19 +1182,20 @@ err_out:
  *     4.4. let the guest deal with it if the error propagation will reach it
  */
 int __mem_sharing_unshare_page(struct domain *d,
-                             unsigned long gfn, 
-                             uint16_t flags)
+                               unsigned long gfn,
+                               uint16_t flags)
 {
     p2m_type_t p2mt;
     mfn_t mfn;
     struct page_info *page, *old_page;
     int last_gfn;
     gfn_info_t *gfn_info = NULL;
-   
+
     mfn = get_gfn(d, gfn, &p2mt);
-    
+
     /* Has someone already unshared it? */
-    if ( !p2m_is_shared(p2mt) ) {
+    if ( !p2m_is_shared(p2mt) )
+    {
         put_gfn(d, gfn);
         return 0;
     }
@@ -1167,26 +1203,30 @@ int __mem_sharing_unshare_page(struct domain *d,
     page = __grab_shared_page(mfn);
     if ( page == NULL )
     {
-        gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: "
-                                "%lx\n", gfn);
+        gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: %lx\n",
+                 gfn);
         BUG();
     }
 
     gfn_info = rmap_retrieve(d->domain_id, gfn, page);
     if ( unlikely(gfn_info == NULL) )
     {
-        gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: "
-                                "%lx\n", gfn);
+        gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: %lx\n",
+                 gfn);
         BUG();
     }
 
-    /* Do the accounting first. If anything fails below, we have bigger
-     * bigger fish to fry. First, remove the gfn from the list. */ 
+    /*
+     * Do the accounting first. If anything fails below, we have bigger
+     * bigger fish to fry. First, remove the gfn from the list.
+     */
     last_gfn = rmap_has_one_entry(page);
     if ( last_gfn )
     {
-        /* Clean up shared state. Get rid of the <domid, gfn> tuple
-         * before destroying the rmap. */
+        /*
+         * Clean up shared state. Get rid of the <domid, gfn> tuple
+         * before destroying the rmap.
+         */
         mem_sharing_gfn_destroy(page, d, gfn_info);
         page_sharing_dispose(page);
         page->sharing = NULL;
@@ -1195,8 +1235,10 @@ int __mem_sharing_unshare_page(struct domain *d,
     else
         atomic_dec(&nr_saved_mfns);
 
-    /* If the GFN is getting destroyed drop the references to MFN 
-     * (possibly freeing the page), and exit early */
+    /*
+     * If the GFN is getting destroyed drop the references to MFN
+     * (possibly freeing the page), and exit early.
+     */
     if ( flags & MEM_SHARING_DESTROY_GFN )
     {
         if ( !last_gfn )
@@ -1212,7 +1254,7 @@ int __mem_sharing_unshare_page(struct domain *d,
 
         return 0;
     }
- 
+
     if ( last_gfn )
     {
         /* Making a page private atomically unlocks it */
@@ -1222,14 +1264,16 @@ int __mem_sharing_unshare_page(struct domain *d,
 
     old_page = page;
     page = alloc_domheap_page(d, 0);
-    if ( !page ) 
+    if ( !page )
     {
         /* Undo dec of nr_saved_mfns, as the retry will decrease again. */
         atomic_inc(&nr_saved_mfns);
         mem_sharing_page_unlock(old_page);
         put_gfn(d, gfn);
-        /* Caller is responsible for placing an event
-         * in the ring */
+        /*
+         * Caller is responsible for placing an event
+         * in the ring.
+         */
         return -ENOMEM;
     }
 
@@ -1240,19 +1284,21 @@ int __mem_sharing_unshare_page(struct domain *d,
     mem_sharing_page_unlock(old_page);
     put_page_and_type(old_page);
 
-private_page_found:    
+ private_page_found:
     if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) )
     {
-        gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", 
-                                d->domain_id, gfn);
+        gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n",
+                 d->domain_id, gfn);
         BUG();
     }
 
     /* Update m2p entry */
     set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), gfn);
 
-    /* Now that the gfn<->mfn map is properly established,
-     * marking dirty is feasible */
+    /*
+     * Now that the gfn<->mfn map is properly established,
+     * marking dirty is feasible
+     */
     paging_mark_dirty(d, page_to_mfn(page));
     /* We do not need to unlock a private page */
     put_gfn(d, gfn);
@@ -1277,20 +1323,23 @@ int relinquish_shared_pages(struct domain *d)
         mfn_t mfn;
         int set_rc;
 
-        if ( atomic_read(&d->shr_pages) == 0 )
+        if ( !atomic_read(&d->shr_pages) )
             break;
+
         mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL);
-        if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
+        if ( mfn_valid(mfn) && t == p2m_ram_shared )
         {
             /* Does not fail with ENOMEM given the DESTROY flag */
-            BUG_ON(__mem_sharing_unshare_page(d, gfn, 
-                    MEM_SHARING_DESTROY_GFN));
-            /* Clear out the p2m entry so no one else may try to
+            BUG_ON(__mem_sharing_unshare_page(
+                       d, gfn, MEM_SHARING_DESTROY_GFN));
+            /*
+             * Clear out the p2m entry so no one else may try to
              * unshare.  Must succeed: we just read the old entry and
-             * we hold the p2m lock. */
+             * we hold the p2m lock.
+             */
             set_rc = p2m->set_entry(p2m, _gfn(gfn), _mfn(0), PAGE_ORDER_4K,
                                     p2m_invalid, p2m_access_rwx, -1);
-            ASSERT(set_rc == 0);
+            ASSERT(!set_rc);
             count += 0x10;
         }
         else
@@ -1392,255 +1441,255 @@ int 
mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
 
     switch ( mso.op )
     {
-        case XENMEM_sharing_op_nominate_gfn:
-        {
-            shr_handle_t handle;
+    case XENMEM_sharing_op_nominate_gfn:
+    {
+        shr_handle_t handle;
 
-            rc = -EINVAL;
-            if ( !mem_sharing_enabled(d) )
-                goto out;
+        rc = -EINVAL;
+        if ( !mem_sharing_enabled(d) )
+            goto out;
 
-            rc = nominate_page(d, _gfn(mso.u.nominate.u.gfn), 0, &handle);
-            mso.u.nominate.handle = handle;
-        }
-        break;
+        rc = nominate_page(d, _gfn(mso.u.nominate.u.gfn), 0, &handle);
+        mso.u.nominate.handle = handle;
+    }
+    break;
 
-        case XENMEM_sharing_op_nominate_gref:
-        {
-            grant_ref_t gref = mso.u.nominate.u.grant_ref;
-            gfn_t gfn;
-            shr_handle_t handle;
+    case XENMEM_sharing_op_nominate_gref:
+    {
+        grant_ref_t gref = mso.u.nominate.u.grant_ref;
+        gfn_t gfn;
+        shr_handle_t handle;
+
+        rc = -EINVAL;
+        if ( !mem_sharing_enabled(d) )
+            goto out;
+        rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &gfn, NULL);
+        if ( rc < 0 )
+            goto out;
+
+        rc = nominate_page(d, gfn, 3, &handle);
+        mso.u.nominate.handle = handle;
+    }
+    break;
 
-            rc = -EINVAL;
-            if ( !mem_sharing_enabled(d) )
-                goto out;
-            rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &gfn, NULL);
-            if ( rc < 0 )
-                goto out;
+    case XENMEM_sharing_op_share:
+    {
+        gfn_t sgfn, cgfn;
+        struct domain *cd;
+        shr_handle_t sh, ch;
 
-            rc = nominate_page(d, gfn, 3, &handle);
-            mso.u.nominate.handle = handle;
-        }
-        break;
+        rc = -EINVAL;
+        if ( !mem_sharing_enabled(d) )
+            goto out;
 
-        case XENMEM_sharing_op_share:
+        rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
+                                               &cd);
+        if ( rc )
+            goto out;
+
+        rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
+        if ( rc )
         {
-            gfn_t sgfn, cgfn;
-            struct domain *cd;
-            shr_handle_t sh, ch;
+            rcu_unlock_domain(cd);
+            goto out;
+        }
 
+        if ( !mem_sharing_enabled(cd) )
+        {
+            rcu_unlock_domain(cd);
             rc = -EINVAL;
-            if ( !mem_sharing_enabled(d) )
-                goto out;
+            goto out;
+        }
 
-            rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
-                                                   &cd);
-            if ( rc )
-                goto out;
+        if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
+        {
+            grant_ref_t gref =
+                XENMEM_SHARING_OP_FIELD_GET_GREF(mso.u.share.source_gfn);
 
-            rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
-            if ( rc )
+            rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn,
+                                         NULL);
+            if ( rc < 0 )
             {
                 rcu_unlock_domain(cd);
                 goto out;
             }
+        }
+        else
+            sgfn = _gfn(mso.u.share.source_gfn);
 
-            if ( !mem_sharing_enabled(cd) )
+        if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) )
+        {
+            grant_ref_t gref =
+                XENMEM_SHARING_OP_FIELD_GET_GREF(mso.u.share.client_gfn);
+
+            rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn,
+                                         NULL);
+            if ( rc < 0 )
             {
                 rcu_unlock_domain(cd);
-                rc = -EINVAL;
                 goto out;
             }
+        }
+        else
+            cgfn = _gfn(mso.u.share.client_gfn);
 
-            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
-            {
-                grant_ref_t gref = (grant_ref_t) 
-                                    (XENMEM_SHARING_OP_FIELD_GET_GREF(
-                                        mso.u.share.source_gfn));
-                rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn,
-                                             NULL);
-                if ( rc < 0 )
-                {
-                    rcu_unlock_domain(cd);
-                    goto out;
-                }
-            }
-            else
-                sgfn = _gfn(mso.u.share.source_gfn);
+        sh = mso.u.share.source_handle;
+        ch = mso.u.share.client_handle;
 
-            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) )
-            {
-                grant_ref_t gref = (grant_ref_t) 
-                                    (XENMEM_SHARING_OP_FIELD_GET_GREF(
-                                        mso.u.share.client_gfn));
-                rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn,
-                                             NULL);
-                if ( rc < 0 )
-                {
-                    rcu_unlock_domain(cd);
-                    goto out;
-                }
-            }
-            else
-                cgfn = _gfn(mso.u.share.client_gfn);
+        rc = share_pages(d, sgfn, sh, cd, cgfn, ch);
 
-            sh = mso.u.share.source_handle;
-            ch = mso.u.share.client_handle;
+        rcu_unlock_domain(cd);
+    }
+    break;
 
-            rc = share_pages(d, sgfn, sh, cd, cgfn, ch);
+    case XENMEM_sharing_op_add_physmap:
+    {
+        unsigned long sgfn, cgfn;
+        struct domain *cd;
+        shr_handle_t sh;
+
+        rc = -EINVAL;
+        if ( !mem_sharing_enabled(d) )
+            goto out;
 
+        rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
+                                               &cd);
+        if ( rc )
+            goto out;
+
+        rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
+        if ( rc )
+        {
             rcu_unlock_domain(cd);
+            goto out;
         }
-        break;
 
-        case XENMEM_sharing_op_add_physmap:
+        if ( !mem_sharing_enabled(cd) )
         {
-            unsigned long sgfn, cgfn;
-            struct domain *cd;
-            shr_handle_t sh;
+            rcu_unlock_domain(cd);
+            rc = -EINVAL;
+            goto out;
+        }
 
+        if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
+        {
+            /* Cannot add a gref to the physmap */
+            rcu_unlock_domain(cd);
             rc = -EINVAL;
-            if ( !mem_sharing_enabled(d) )
-                goto out;
+            goto out;
+        }
 
-            rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain,
-                                                   &cd);
-            if ( rc )
-                goto out;
+        sgfn    = mso.u.share.source_gfn;
+        sh      = mso.u.share.source_handle;
+        cgfn    = mso.u.share.client_gfn;
 
-            rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op);
-            if ( rc )
-            {
-                rcu_unlock_domain(cd);
-                goto out;
-            }
+        rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn);
 
-            if ( !mem_sharing_enabled(cd) )
-            {
-                rcu_unlock_domain(cd);
-                rc = -EINVAL;
-                goto out;
-            }
+        rcu_unlock_domain(cd);
+    }
+    break;
 
-            if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
-            {
-                /* Cannot add a gref to the physmap */
-                rcu_unlock_domain(cd);
-                rc = -EINVAL;
-                goto out;
-            }
+    case XENMEM_sharing_op_range_share:
+    {
+        unsigned long max_sgfn, max_cgfn;
+        struct domain *cd;
+
+        rc = -EINVAL;
+        if ( mso.u.range._pad[0] || mso.u.range._pad[1] ||
+             mso.u.range._pad[2] )
+            goto out;
 
-            sgfn    = mso.u.share.source_gfn;
-            sh      = mso.u.share.source_handle;
-            cgfn    = mso.u.share.client_gfn;
+        /*
+         * We use opaque for the hypercall continuation value.
+         * Ideally the user sets this to 0 in the beginning but
+         * there is no good way of enforcing that here, so we just check
+         * that it's at least in range.
+         */
+        if ( mso.u.range.opaque &&
+             (mso.u.range.opaque < mso.u.range.first_gfn ||
+              mso.u.range.opaque > mso.u.range.last_gfn) )
+            goto out;
 
-            rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); 
+        if ( !mem_sharing_enabled(d) )
+            goto out;
 
+        rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain,
+                                               &cd);
+        if ( rc )
+            goto out;
+
+        /*
+         * We reuse XENMEM_sharing_op_share XSM check here as this is
+         * essentially the same concept repeated over multiple pages.
+         */
+        rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd,
+                                XENMEM_sharing_op_share);
+        if ( rc )
+        {
             rcu_unlock_domain(cd);
+            goto out;
         }
-        break;
 
-        case XENMEM_sharing_op_range_share:
+        if ( !mem_sharing_enabled(cd) )
         {
-            unsigned long max_sgfn, max_cgfn;
-            struct domain *cd;
-
+            rcu_unlock_domain(cd);
             rc = -EINVAL;
-            if ( mso.u.range._pad[0] || mso.u.range._pad[1] ||
-                 mso.u.range._pad[2] )
-                 goto out;
-
-            /*
-             * We use opaque for the hypercall continuation value.
-             * Ideally the user sets this to 0 in the beginning but
-             * there is no good way of enforcing that here, so we just check
-             * that it's at least in range.
-             */
-            if ( mso.u.range.opaque &&
-                 (mso.u.range.opaque < mso.u.range.first_gfn ||
-                  mso.u.range.opaque > mso.u.range.last_gfn) )
-                goto out;
-
-            if ( !mem_sharing_enabled(d) )
-                goto out;
-
-            rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain,
-                                                   &cd);
-            if ( rc )
-                goto out;
-
-            /*
-             * We reuse XENMEM_sharing_op_share XSM check here as this is
-             * essentially the same concept repeated over multiple pages.
-             */
-            rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd,
-                                    XENMEM_sharing_op_share);
-            if ( rc )
-            {
-                rcu_unlock_domain(cd);
-                goto out;
-            }
-
-            if ( !mem_sharing_enabled(cd) )
-            {
-                rcu_unlock_domain(cd);
-                rc = -EINVAL;
-                goto out;
-            }
-
-            /*
-             * Sanity check only, the client should keep the domains paused for
-             * the duration of this op.
-             */
-            if ( !atomic_read(&d->pause_count) ||
-                 !atomic_read(&cd->pause_count) )
-            {
-                rcu_unlock_domain(cd);
-                rc = -EINVAL;
-                goto out;
-            }
+            goto out;
+        }
 
-            max_sgfn = domain_get_maximum_gpfn(d);
-            max_cgfn = domain_get_maximum_gpfn(cd);
+        /*
+         * Sanity check only, the client should keep the domains paused for
+         * the duration of this op.
+         */
+        if ( !atomic_read(&d->pause_count) ||
+             !atomic_read(&cd->pause_count) )
+        {
+            rcu_unlock_domain(cd);
+            rc = -EINVAL;
+            goto out;
+        }
 
-            if ( max_sgfn < mso.u.range.first_gfn ||
-                 max_sgfn < mso.u.range.last_gfn ||
-                 max_cgfn < mso.u.range.first_gfn ||
-                 max_cgfn < mso.u.range.last_gfn )
-            {
-                rcu_unlock_domain(cd);
-                rc = -EINVAL;
-                goto out;
-            }
+        max_sgfn = domain_get_maximum_gpfn(d);
+        max_cgfn = domain_get_maximum_gpfn(cd);
 
-            rc = range_share(d, cd, &mso.u.range);
+        if ( max_sgfn < mso.u.range.first_gfn ||
+             max_sgfn < mso.u.range.last_gfn ||
+             max_cgfn < mso.u.range.first_gfn ||
+             max_cgfn < mso.u.range.last_gfn )
+        {
             rcu_unlock_domain(cd);
+            rc = -EINVAL;
+            goto out;
+        }
 
-            if ( rc > 0 )
-            {
-                if ( __copy_to_guest(arg, &mso, 1) )
-                    rc = -EFAULT;
-                else
-                    rc = hypercall_create_continuation(__HYPERVISOR_memory_op,
-                                                       "lh", XENMEM_sharing_op,
-                                                       arg);
-            }
+        rc = range_share(d, cd, &mso.u.range);
+        rcu_unlock_domain(cd);
+
+        if ( rc > 0 )
+        {
+            if ( __copy_to_guest(arg, &mso, 1) )
+                rc = -EFAULT;
             else
-                mso.u.range.opaque = 0;
+                rc = hypercall_create_continuation(__HYPERVISOR_memory_op,
+                                                   "lh", XENMEM_sharing_op,
+                                                   arg);
         }
-        break;
+        else
+            mso.u.range.opaque = 0;
+    }
+    break;
 
-        case XENMEM_sharing_op_debug_gfn:
-            rc = debug_gfn(d, _gfn(mso.u.debug.u.gfn));
-            break;
+    case XENMEM_sharing_op_debug_gfn:
+        rc = debug_gfn(d, _gfn(mso.u.debug.u.gfn));
+        break;
 
-        case XENMEM_sharing_op_debug_gref:
-            rc = debug_gref(d, mso.u.debug.u.gref);
-            break;
+    case XENMEM_sharing_op_debug_gref:
+        rc = debug_gref(d, mso.u.debug.u.gref);
+        break;
 
-        default:
-            rc = -ENOSYS;
-            break;
+    default:
+        rc = -ENOSYS;
+        break;
     }
 
     if ( !rc && __copy_to_guest(arg, &mso, 1) )
@@ -1657,22 +1706,21 @@ int mem_sharing_domctl(struct domain *d, struct 
xen_domctl_mem_sharing_op *mec)
 
     /* Only HAP is supported */
     if ( !hap_enabled(d) )
-         return -ENODEV;
+        return -ENODEV;
 
-    switch(mec->op)
+    switch ( mec->op )
     {
-        case XEN_DOMCTL_MEM_SHARING_CONTROL:
-        {
-            rc = 0;
-            if ( unlikely(is_iommu_enabled(d) && mec->u.enable) )
-                rc = -EXDEV;
-            else
-                d->arch.hvm.mem_sharing_enabled = mec->u.enable;
-        }
+    case XEN_DOMCTL_MEM_SHARING_CONTROL:
+        rc = 0;
+        if ( unlikely(is_iommu_enabled(d) && mec->u.enable) )
+            rc = -EXDEV;
+        else
+            d->arch.hvm.mem_sharing_enabled = mec->u.enable;
         break;
 
-        default:
-            rc = -ENOSYS;
+    default:
+        rc = -ENOSYS;
+        break;
     }
 
     return rc;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index ba126f790a..3119269073 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -511,8 +511,10 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn_l,
     if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        /* Try to unshare. If we fail, communicate ENOMEM without
-         * sleeping. */
+        /*
+         * Try to unshare. If we fail, communicate ENOMEM without
+         * sleeping.
+         */
         if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 )
             mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
         mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
@@ -892,15 +894,15 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, 
mfn_t mfn,
                               &a, 0, NULL, NULL);
         if ( p2m_is_shared(ot) )
         {
-            /* Do an unshare to cleanly take care of all corner 
-             * cases. */
+            /* Do an unshare to cleanly take care of all corner cases. */
             int rc;
             rc = mem_sharing_unshare_page(p2m->domain,
                                           gfn_x(gfn_add(gfn, i)), 0);
             if ( rc )
             {
                 p2m_unlock(p2m);
-                /* NOTE: Should a guest domain bring this upon itself,
+                /*
+                 * NOTE: Should a guest domain bring this upon itself,
                  * there is not a whole lot we can do. We are buried
                  * deep in locks from most code paths by now. So, fail
                  * the call and don't try to sleep on a wait queue
@@ -909,8 +911,9 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
                  * However, all current (changeset 3432abcf9380) code
                  * paths avoid this unsavoury situation. For now.
                  *
-                 * Foreign domains are okay to place an event as they 
-                 * won't go to sleep. */
+                 * Foreign domains are okay to place an event as they
+                 * won't go to sleep.
+                 */
                 (void)mem_sharing_notify_enomem(p2m->domain,
                                                 gfn_x(gfn_add(gfn, i)), false);
                 return rc;
diff --git a/xen/include/asm-x86/mem_sharing.h 
b/xen/include/asm-x86/mem_sharing.h
index db22468744..af2a1038b5 100644
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -33,12 +33,14 @@
 #define MEM_SHARING_AUDIT 0
 #endif
 
-typedef uint64_t shr_handle_t; 
+typedef uint64_t shr_handle_t;
 
 typedef struct rmap_hashtab {
     struct list_head *bucket;
-    /* Overlaps with prev pointer of list_head in union below.
-     * Unlike the prev pointer, this can be NULL. */
+    /*
+     * Overlaps with prev pointer of list_head in union below.
+     * Unlike the prev pointer, this can be NULL.
+     */
     void *flag;
 } rmap_hashtab_t;
 
@@ -57,34 +59,33 @@ struct page_sharing_info
     };
 };
 
-#define sharing_supported(_d) \
-    (is_hvm_domain(_d) && paging_mode_hap(_d)) 
-
 unsigned int mem_sharing_get_nr_saved_mfns(void);
 unsigned int mem_sharing_get_nr_shared_mfns(void);
 
 #define MEM_SHARING_DESTROY_GFN       (1<<1)
 /* Only fails with -ENOMEM. Enforce it with a BUG_ON wrapper. */
 int __mem_sharing_unshare_page(struct domain *d,
-                             unsigned long gfn, 
-                             uint16_t flags);
+                               unsigned long gfn,
+                               uint16_t flags);
+
 static inline int mem_sharing_unshare_page(struct domain *d,
                                            unsigned long gfn,
                                            uint16_t flags)
 {
     int rc = __mem_sharing_unshare_page(d, gfn, flags);
-    BUG_ON( rc && (rc != -ENOMEM) );
+    BUG_ON(rc && (rc != -ENOMEM));
     return rc;
 }
 
-/* If called by a foreign domain, possible errors are
+/*
+ * If called by a foreign domain, possible errors are
  *   -EBUSY -> ring full
  *   -ENOSYS -> no ring to begin with
  * and the foreign mapper is responsible for retrying.
  *
- * If called by the guest vcpu itself and allow_sleep is set, may 
- * sleep on a wait queue, so the caller is responsible for not 
- * holding locks on entry. It may only fail with ENOSYS 
+ * If called by the guest vcpu itself and allow_sleep is set, may
+ * sleep on a wait queue, so the caller is responsible for not
+ * holding locks on entry. It may only fail with ENOSYS
  *
  * If called by the guest vcpu itself and allow_sleep is not set,
  * then it's the same as a foreign domain.
@@ -92,10 +93,11 @@ static inline int mem_sharing_unshare_page(struct domain *d,
 int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
                               bool allow_sleep);
 int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg);
-int mem_sharing_domctl(struct domain *d, 
+int mem_sharing_domctl(struct domain *d,
                        struct xen_domctl_mem_sharing_op *mec);
 
-/* Scans the p2m and relinquishes any shared pages, destroying 
+/*
+ * Scans the p2m and relinquishes any shared pages, destroying
  * those for which this domain holds the final reference.
  * Preemptible.
  */
@@ -107,19 +109,21 @@ static inline unsigned int 
mem_sharing_get_nr_saved_mfns(void)
 {
     return 0;
 }
+
 static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
 {
     return 0;
 }
-static inline int mem_sharing_unshare_page(struct domain *d,
-                                           unsigned long gfn,
+
+static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn,
                                            uint16_t flags)
 {
     ASSERT_UNREACHABLE();
     return -EOPNOTSUPP;
 }
+
 static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long 
gfn,
-                              bool allow_sleep)
+                                            bool allow_sleep)
 {
     ASSERT_UNREACHABLE();
     return -EOPNOTSUPP;
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.