[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen master] x86/mm: don't open-code p2m_is_pod()



commit c80878c3203d6ddb80f6c5ca4f4c83a5fc042ebe
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Dec 6 14:13:03 2021 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Dec 6 14:13:03 2021 +0100

    x86/mm: don't open-code p2m_is_pod()
    
    Replace all comparisons against p2m_populate_on_demand (outside of
    switch() statements) with the designated predicate.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/p2m-ept.c      |  6 +++---
 xen/arch/x86/mm/p2m-pod.c      |  6 +++---
 xen/arch/x86/mm/p2m-pt.c       | 11 +++++------
 xen/arch/x86/mm/p2m.c          |  2 +-
 xen/arch/x86/mm/shadow/multi.c |  6 +++---
 5 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 1459f66c00..a4f5fc4b0d 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -344,7 +344,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t 
read_only,
     {
         int rc;
 
-        if ( e.sa_p2mt == p2m_populate_on_demand )
+        if ( p2m_is_pod(e.sa_p2mt) )
             return GUEST_TABLE_POD_PAGE;
 
         if ( read_only )
@@ -1071,7 +1071,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
     index = gfn_remainder >> (i * EPT_TABLE_ORDER);
     ept_entry = table + index;
 
-    if ( ept_entry->sa_p2mt == p2m_populate_on_demand )
+    if ( p2m_is_pod(ept_entry->sa_p2mt) )
     {
         if ( !(q & P2M_ALLOC) )
         {
@@ -1478,7 +1478,7 @@ static void ept_dump_p2m_table(unsigned char key)
             ept_entry = table + (gfn_remainder >> order);
             if ( ret != GUEST_TABLE_MAP_FAILED && is_epte_valid(ept_entry) )
             {
-                if ( ept_entry->sa_p2mt == p2m_populate_on_demand )
+                if ( p2m_is_pod(ept_entry->sa_p2mt) )
                     printk("gfn: %13lx order: %2d PoD\n", gfn, order);
                 else
                     printk("gfn: %13lx order: %2d mfn: %13lx %c%c%c %c%c%c\n",
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 9c4c0caccb..afee09ab40 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -540,7 +540,7 @@ decrease_reservation(struct domain *d, gfn_t gfn, unsigned 
int order)
 
         p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, &cur_order, NULL);
         n = 1UL << min(order, cur_order);
-        if ( t == p2m_populate_on_demand )
+        if ( p2m_is_pod(t) )
             pod += n;
         else if ( p2m_is_ram(t) )
             ram += n;
@@ -615,7 +615,7 @@ decrease_reservation(struct domain *d, gfn_t gfn, unsigned 
int order)
         if ( order < cur_order )
             cur_order = order;
         n = 1UL << cur_order;
-        if ( t == p2m_populate_on_demand )
+        if ( p2m_is_pod(t) )
         {
             /* This shouldn't be able to fail */
             if ( p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order,
@@ -1329,7 +1329,7 @@ mark_populate_on_demand(struct domain *d, unsigned long 
gfn_l,
 
         p2m->get_entry(p2m, gfn_add(gfn, i), &ot, &a, 0, &cur_order, NULL);
         n = 1UL << min(order, cur_order);
-        if ( ot == p2m_populate_on_demand )
+        if ( p2m_is_pod(ot) )
         {
             /* Count how many PoD entries we'll be replacing if successful */
             pod_count += n;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 09c99d78aa..ef3f8e02a4 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -841,7 +841,7 @@ pod_retry_l3:
         flags = l3e_get_flags(*l3e);
         if ( !(flags & _PAGE_PRESENT) )
         {
-            if ( p2m_flags_to_type(flags) == p2m_populate_on_demand )
+            if ( p2m_is_pod(p2m_flags_to_type(flags)) )
             {
                 if ( q & P2M_ALLOC )
                 {
@@ -884,7 +884,7 @@ pod_retry_l2:
     if ( !(flags & _PAGE_PRESENT) )
     {
         /* PoD: Try to populate a 2-meg chunk */
-        if ( p2m_flags_to_type(flags) == p2m_populate_on_demand )
+        if ( p2m_is_pod(p2m_flags_to_type(flags)) )
         {
             if ( q & P2M_ALLOC ) {
                 if ( p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_2M) )
@@ -923,7 +923,7 @@ pod_retry_l1:
     if ( !(flags & _PAGE_PRESENT) && !p2m_is_paging(l1t) )
     {
         /* PoD: Try to populate */
-        if ( l1t == p2m_populate_on_demand )
+        if ( p2m_is_pod(l1t) )
         {
             if ( q & P2M_ALLOC ) {
                 if ( p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_4K) )
@@ -1094,8 +1094,7 @@ static long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                     if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
                     {
                         if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE)
-                             && ( p2m_flags_to_type(l2e_get_flags(l2e[i2]))
-                                  == p2m_populate_on_demand ) )
+                             && 
p2m_is_pod(p2m_flags_to_type(l2e_get_flags(l2e[i2]))) )
                             entry_count+=SUPERPAGE_PAGES;
                         gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT);
                         continue;
@@ -1132,7 +1131,7 @@ static long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                         type = p2m_flags_to_type(l1e_get_flags(l1e[i1]));
                         if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
                         {
-                            if ( type == p2m_populate_on_demand )
+                            if ( p2m_is_pod(type) )
                                 entry_count++;
                             continue;
                         }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index def1695cf0..1d5a87a969 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -992,7 +992,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
             ASSERT(mfn_valid(omfn));
             set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
         }
-        else if ( ot == p2m_populate_on_demand )
+        else if ( p2m_is_pod(ot) )
         {
             /* Count how man PoD entries we'll be replacing if successful */
             pod_count++;
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index be617281fc..bddef53163 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1478,7 +1478,7 @@ static int validate_gl4e(struct vcpu *v, void *new_ge, 
mfn_t sl4mfn, void *se)
         mfn_t gl3mfn = get_gfn_query_unlocked(d, gfn_x(gl3gfn), &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl3mfn = get_shadow_status(d, gl3mfn, SH_type_l3_shadow);
-        else if ( p2mt != p2m_populate_on_demand )
+        else if ( !p2m_is_pod(p2mt) )
             result |= SHADOW_SET_ERROR;
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
@@ -1537,7 +1537,7 @@ static int validate_gl3e(struct vcpu *v, void *new_ge, 
mfn_t sl3mfn, void *se)
         mfn_t gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
         if ( p2m_is_ram(p2mt) )
             sl2mfn = get_shadow_status(d, gl2mfn, SH_type_l2_shadow);
-        else if ( p2mt != p2m_populate_on_demand )
+        else if ( !p2m_is_pod(p2mt) )
             result |= SHADOW_SET_ERROR;
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
@@ -1588,7 +1588,7 @@ static int validate_gl2e(struct vcpu *v, void *new_ge, 
mfn_t sl2mfn, void *se)
             mfn_t gl1mfn = get_gfn_query_unlocked(d, gfn_x(gl1gfn), &p2mt);
             if ( p2m_is_ram(p2mt) )
                 sl1mfn = get_shadow_status(d, gl1mfn, SH_type_l1_shadow);
-            else if ( p2mt != p2m_populate_on_demand )
+            else if ( !p2m_is_pod(p2mt) )
                 result |= SHADOW_SET_ERROR;
         }
     }
--
generated by git-patchbot for /home/xen/git/xen.git#master



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.