[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] mm: add 'is_special_page' inline function...



commit 0537d246f8db3ac0a1df2ce653b07e85cd887962
Author:     Paul Durrant <paul@xxxxxxx>
AuthorDate: Tue Mar 24 17:40:50 2020 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Tue Mar 24 17:40:50 2020 +0100

    mm: add 'is_special_page' inline function...
    
    ... to cover xenheap and PGC_extra pages.
    
    PGC_extra pages are intended to hold data structures that are associated
    with a domain and may be mapped by that domain. They should not be treated
    as 'normal' guest pages (i.e. RAM or page tables). Hence, in many cases
    where code currently tests is_xen_heap_page() it should also check for
    the PGC_extra bit in 'count_info'.
    
    This patch therefore defines is_special_page() to cover both cases and
    converts tests of is_xen_heap_page() (or open coded tests of PGC_xen_heap)
    to is_special_page() where the page is assigned to a domain.
    
    Signed-off-by: Paul Durrant <paul@xxxxxxx>
    Acked-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Julien Grall <julien@xxxxxxx>
---
 xen/arch/x86/domctl.c           |  2 +-
 xen/arch/x86/mm.c               | 13 ++++++-------
 xen/arch/x86/mm/altp2m.c        |  2 +-
 xen/arch/x86/mm/mem_sharing.c   |  3 +--
 xen/arch/x86/mm/p2m-pod.c       | 12 +++++++-----
 xen/arch/x86/mm/p2m.c           |  4 ++--
 xen/arch/x86/mm/shadow/common.c | 13 ++++++++-----
 xen/arch/x86/mm/shadow/multi.c  |  3 ++-
 xen/arch/x86/tboot.c            |  4 ++--
 xen/include/xen/mm.h            |  5 +++++
 10 files changed, 35 insertions(+), 26 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index ed86762fa6..add70126b9 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -394,7 +394,7 @@ long arch_do_domctl(
             page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC);
 
             if ( unlikely(!page) ||
-                 unlikely(is_xen_heap_page(page)) )
+                 unlikely(is_special_page(page)) )
             {
                 if ( unlikely(p2m_is_broken(t)) )
                     type = XEN_DOMCTL_PFINFO_BROKEN;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 62507ca651..2fac67ad57 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1014,7 +1014,7 @@ get_page_from_l1e(
         unsigned long cacheattr = pte_flags_to_cacheattr(l1f);
         int err;
 
-        if ( is_xen_heap_page(page) )
+        if ( is_special_page(page) )
         {
             if ( write )
                 put_page_type(page);
@@ -2447,7 +2447,7 @@ static int cleanup_page_mappings(struct page_info *page)
     {
         page->count_info &= ~PGC_cacheattr_mask;
 
-        BUG_ON(is_xen_heap_page(page));
+        BUG_ON(is_special_page(page));
 
         rc = update_xen_mappings(mfn, 0);
     }
@@ -2477,7 +2477,7 @@ static int cleanup_page_mappings(struct page_info *page)
                 rc = rc2;
         }
 
-        if ( likely(!is_xen_heap_page(page)) )
+        if ( likely(!is_special_page(page)) )
         {
             ASSERT((page->u.inuse.type_info &
                     (PGT_type_mask | PGT_count_mask)) == PGT_writable_page);
@@ -4216,8 +4216,7 @@ int steal_page(
     if ( !(owner = page_get_owner_and_reference(page)) )
         goto fail;
 
-    if ( owner != d || is_xen_heap_page(page) ||
-         (page->count_info & PGC_extra) )
+    if ( owner != d || is_special_page(page) )
         goto fail_put;
 
     /*
@@ -4580,8 +4579,8 @@ int xenmem_add_to_physmap_one(
     prev_mfn = get_gfn(d, gfn_x(gpfn), &p2mt);
     if ( mfn_valid(prev_mfn) )
     {
-        if ( is_xen_heap_mfn(prev_mfn) )
-            /* Xen heap frames are simply unhooked from this phys slot. */
+        if ( is_special_page(mfn_to_page(prev_mfn)) )
+            /* Special pages are simply unhooked from this phys slot. */
             rc = guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
         else
             /* Normal domain memory is freed, to avoid leaking memory. */
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index 50768f2547..c091b03ea3 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -77,7 +77,7 @@ int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn)
      * pageable() predicate for this, due to it having the same properties
      * that we want.
      */
-    if ( !p2m_is_pageable(p2mt) || is_xen_heap_page(pg) )
+    if ( !p2m_is_pageable(p2mt) || is_special_page(pg) )
     {
         rc = -EINVAL;
         goto err;
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 3835bc928f..f49f27a3ef 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -840,9 +840,8 @@ static int nominate_page(struct domain *d, gfn_t gfn,
     if ( !p2m_is_sharable(p2mt) )
         goto out;
 
-    /* Skip xen heap pages */
     page = mfn_to_page(mfn);
-    if ( !page || is_xen_heap_page(page) )
+    if ( !page || is_special_page(page) )
         goto out;
 
     /* Check if there are mem_access/remapped altp2m entries for this page */
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 2a7b8c117b..36bc471e7f 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -749,8 +749,9 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t 
gfn)
 
         n = 1UL << min(cur_order, SUPERPAGE_ORDER + 0U);
         for ( k = 0, page = mfn_to_page(mfn); k < n; ++k, ++page )
-            if ( !(page->count_info & PGC_allocated) ||
-                 (page->count_info & (PGC_page_table | PGC_xen_heap)) ||
+            if ( is_special_page(page) ||
+                 !(page->count_info & PGC_allocated) ||
+                 (page->count_info & PGC_page_table) ||
                  (page->count_info & PGC_count_mask) > max_ref )
                 goto out;
     }
@@ -883,11 +884,12 @@ p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t 
*gfns, unsigned int count
         pg = mfn_to_page(mfns[i]);
 
         /*
-         * If this is ram, and not a pagetable or from the xen heap, and
+         * If this is ram, and not a pagetable or a special page, and
          * probably not mapped elsewhere, map it; otherwise, skip.
          */
-        if ( p2m_is_ram(types[i]) && (pg->count_info & PGC_allocated) &&
-             !(pg->count_info & (PGC_page_table | PGC_xen_heap)) &&
+        if ( !is_special_page(pg) && p2m_is_ram(types[i]) &&
+             (pg->count_info & PGC_allocated) &&
+             !(pg->count_info & PGC_page_table) &&
              ((pg->count_info & PGC_count_mask) <= max_ref) )
             map[i] = map_domain_page(mfns[i]);
         else
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 9f51370327..d93c418bcf 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2983,8 +2983,8 @@ int p2m_add_foreign(struct domain *tdom, unsigned long 
fgfn,
     prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
     if ( mfn_valid(prev_mfn) )
     {
-        if ( is_xen_heap_mfn(prev_mfn) )
-            /* Xen heap frames are simply unhooked from this phys slot */
+        if ( is_special_page(mfn_to_page(prev_mfn)) )
+            /* Special pages are simply unhooked from this phys slot */
             rc = guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
         else
             /* Normal domain memory is freed, to avoid leaking memory. */
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 121ddf1255..75dd414a6e 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2087,19 +2087,22 @@ static int sh_remove_all_mappings(struct domain *d, 
mfn_t gmfn, gfn_t gfn)
          * The qemu helper process has an untyped mapping of this dom's RAM
          * and the HVM restore program takes another.
          * Also allow one typed refcount for
-         * - Xen heap pages, to match share_xen_page_with_guest(),
-         * - ioreq server pages, to match prepare_ring_for_helper().
+         * - special pages, which are explicitly referenced and mapped by
+         *   Xen.
+         * - ioreq server pages, which may be special pages or normal
+         *   guest pages with an extra reference taken by
+         *   prepare_ring_for_helper().
          */
         if ( !(shadow_mode_external(d)
                && (page->count_info & PGC_count_mask) <= 3
                && ((page->u.inuse.type_info & PGT_count_mask)
-                   == (is_xen_heap_page(page) ||
+                   == (is_special_page(page) ||
                        (is_hvm_domain(d) && is_ioreq_server_page(d, page))))) )
             printk(XENLOG_G_ERR "can't find all mappings of mfn %"PRI_mfn
-                   " (gfn %"PRI_gfn"): c=%lx t=%lx x=%d i=%d\n",
+                   " (gfn %"PRI_gfn"): c=%lx t=%lx s=%d i=%d\n",
                    mfn_x(gmfn), gfn_x(gfn),
                    page->count_info, page->u.inuse.type_info,
-                   !!is_xen_heap_page(page),
+                   is_special_page(page),
                    (is_hvm_domain(d) && is_ioreq_server_page(d, page)));
     }
 
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index b6afc0fba4..f6b1628742 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -559,7 +559,8 @@ _sh_propagate(struct vcpu *v,
      * caching attributes in the shadows to match what was asked for.
      */
     if ( (level == 1) && is_hvm_domain(d) &&
-         !is_xen_heap_mfn(target_mfn) )
+         (!mfn_valid(target_mfn) ||
+          !is_special_page(mfn_to_page(target_mfn))) )
     {
         int type;
 
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index ce85a710dd..102c3cd203 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -189,7 +189,7 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
 
         if ( !mfn_valid(_mfn(mfn)) )
             continue;
-        if ( is_page_in_use(page) && !is_xen_heap_page(page) )
+        if ( is_page_in_use(page) && !is_special_page(page) )
         {
             if ( page->count_info & PGC_page_table )
             {
@@ -289,7 +289,7 @@ static void tboot_gen_xenheap_integrity(const uint8_t 
key[TB_KEY_SIZE],
                               + 3 * PAGE_SIZE)) )
             continue; /* skip tboot and its page tables */
 
-        if ( is_page_in_use(page) && is_xen_heap_page(page) )
+        if ( is_page_in_use(page) && is_special_page(page) )
         {
             void *pg;
 
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index a163c201e2..9b62087be1 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -285,6 +285,11 @@ extern struct domain *dom_cow;
 
 #include <asm/mm.h>
 
+static inline bool is_special_page(const struct page_info *page)
+{
+    return is_xen_heap_page(page) || (page->count_info & PGC_extra);
+}
+
 #ifndef page_list_entry
 struct page_list_head
 {
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.