[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 02/13] x86/shadow: drop redundant present bit checks from FOREACH_PRESENT_L<N>E() "bodies"


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Thu, 30 Mar 2023 13:26:02 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=hACkpCooSftjlIisKDKVvGugJLC80byuS3ebKyYgdUY=; b=iI2cFAyrS6xSRYX+RoDIAOnvmxIAghIRkcKHdg1WxqaUYWVDFZTLY9TX4IxP9l6A81y+CZo4U3uK2zCREUcfsdFf9lDAH8u7IWVT5lvXOWtplF7RHEfTPo+HoMdfRVDsi0kdkX+pSIabipMsqry5KRkJu8zHJP6iJqk2rMH3z4OpThWQg8CcwNtjs0rAmdsLknDnFq0Rc2yHNYK6j+sAaw3Zmu5zWZrZn5DuxZ5Bre4Kq9VhTA++taJsWrHH+RBNHx9eYXxIJflY9JnUZj6sRanzS9G67ZLrIEPL+zJUS4Ux3vjOhOIqT38Uig5OqddZ4PZHGh4zS1DDwJoEymm04A==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=DaJO9OHr6FA//Q9hwgzmYIpkbpmei5cDC09U3SFkty1bDplYEPFxI17Vu3XiRCejhmVnwPsPbwUjvTJ2XX1hNSlPI0o2PJUm6A6fuoGqbhtEStfEAGS/yDoKZ5+JY8r3NPVewjV/IgQW+Z8AfpZGvwNPsxMUUz0Ed3NGnyHVGIRGh1P2Oh4N36I5Py4pQBSVaDO5vvV21LgMxP6V13NiiOWIJ0jCAyKhtzLwbLjkcYG0CRd1y4XUPZ3lFYHBShnL4Nd05Y3axEdtQbfeoJIAeVQUt+V794EMLrayudthVG4LqXe811Ta1Wi4n3Yq5MaZ1R+KIk2DY5ZNPgQbZ3dXKw==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Tim Deegan <tim@xxxxxxx>
  • Delivery-date: Thu, 30 Mar 2023 11:26:17 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

FOREACH_PRESENT_L<N>E(), as their names (now) say, already invoke the
"body" only when the present bit is set; no need to re-do the check.

While there also
- stop open-coding mfn_to_maddr() in code being touched (re-indented)
  anyway,
- stop open-coding mfn_eq() in code being touched or in adjacent code,
- drop local variables when they're no longer used at least twice.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v2: Re-base over new earlier patch. More mfn_eq().

--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1289,12 +1289,8 @@ void sh_destroy_l4_shadow(struct domain
     /* Decrement refcounts of all the old entries */
     sl4mfn = smfn;
     FOREACH_PRESENT_L4E(sl4mfn, sl4e, NULL, 0, d, {
-        if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
-        {
-            sh_put_ref(d, shadow_l4e_get_mfn(*sl4e),
-                       (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
-                       | ((unsigned long)sl4e & ~PAGE_MASK));
-        }
+        sh_put_ref(d, shadow_l4e_get_mfn(*sl4e),
+                   mfn_to_maddr(sl4mfn) | ((unsigned long)sl4e & ~PAGE_MASK));
     });
 
     /* Put the memory back in the pool */
@@ -1320,10 +1316,8 @@ void sh_destroy_l3_shadow(struct domain
     /* Decrement refcounts of all the old entries */
     sl3mfn = smfn;
     FOREACH_PRESENT_L3E(sl3mfn, sl3e, NULL, 0, {
-        if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
-            sh_put_ref(d, shadow_l3e_get_mfn(*sl3e),
-                        (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
-                        | ((unsigned long)sl3e & ~PAGE_MASK));
+        sh_put_ref(d, shadow_l3e_get_mfn(*sl3e),
+                   mfn_to_maddr(sl3mfn) | ((unsigned long)sl3e & ~PAGE_MASK));
     });
 
     /* Put the memory back in the pool */
@@ -1352,10 +1346,8 @@ void sh_destroy_l2_shadow(struct domain
     /* Decrement refcounts of all the old entries */
     sl2mfn = smfn;
     FOREACH_PRESENT_L2E(sl2mfn, sl2e, NULL, 0, d, {
-        if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
-            sh_put_ref(d, shadow_l2e_get_mfn(*sl2e),
-                        (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
-                        | ((unsigned long)sl2e & ~PAGE_MASK));
+        sh_put_ref(d, shadow_l2e_get_mfn(*sl2e),
+                   mfn_to_maddr(sl2mfn) | ((unsigned long)sl2e & ~PAGE_MASK));
     });
 
     /* Put the memory back in the pool */
@@ -1390,11 +1382,10 @@ void sh_destroy_l1_shadow(struct domain
         /* Decrement refcounts of all the old entries */
         mfn_t sl1mfn = smfn;
         FOREACH_PRESENT_L1E(sl1mfn, sl1e, NULL, 0, {
-            unsigned int sl1f = shadow_l1e_get_flags(*sl1e);
-
-            if ( (sl1f & _PAGE_PRESENT) && !sh_l1e_is_magic(*sl1e) )
+            if ( !sh_l1e_is_magic(*sl1e) )
             {
-                shadow_vram_put_mfn(shadow_l1e_get_mfn(*sl1e), sl1f,
+                shadow_vram_put_mfn(shadow_l1e_get_mfn(*sl1e),
+                                    shadow_l1e_get_flags(*sl1e),
                                     sl1mfn, sl1e, d);
                 shadow_put_page_from_l1e(*sl1e, d);
             }
@@ -3558,7 +3549,6 @@ int cf_check sh_rm_write_access_from_l1(
 {
     shadow_l1e_t *sl1e;
     int done = 0;
-    int flags;
 #if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
     struct vcpu *curr = current;
     mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */
@@ -3566,10 +3556,8 @@ int cf_check sh_rm_write_access_from_l1(
 
     FOREACH_PRESENT_L1E(sl1mfn, sl1e, NULL, done,
     {
-        flags = shadow_l1e_get_flags(*sl1e);
-        if ( (flags & _PAGE_PRESENT)
-             && (flags & _PAGE_RW)
-             && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) )
+        if ( (shadow_l1e_get_flags(*sl1e) & _PAGE_RW) &&
+             mfn_eq(shadow_l1e_get_mfn(*sl1e), readonly_mfn) )
         {
             shadow_l1e_t ro_sl1e = shadow_l1e_remove_flags(*sl1e, _PAGE_RW);
 
@@ -3595,13 +3583,10 @@ int cf_check sh_rm_mappings_from_l1(
 {
     shadow_l1e_t *sl1e;
     int done = 0;
-    int flags;
 
     FOREACH_PRESENT_L1E(sl1mfn, sl1e, NULL, done,
     {
-        flags = shadow_l1e_get_flags(*sl1e);
-        if ( (flags & _PAGE_PRESENT)
-             && (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) )
+        if ( mfn_eq(shadow_l1e_get_mfn(*sl1e), target_mfn) )
         {
             shadow_set_l1e(d, sl1e, shadow_l1e_empty(), p2m_invalid, sl1mfn);
             if ( sh_check_page_has_no_refs(mfn_to_page(target_mfn)) )
@@ -3646,13 +3631,10 @@ int cf_check sh_remove_l1_shadow(struct
 {
     shadow_l2e_t *sl2e;
     int done = 0;
-    int flags;
 
     FOREACH_PRESENT_L2E(sl2mfn, sl2e, NULL, done, d,
     {
-        flags = shadow_l2e_get_flags(*sl2e);
-        if ( (flags & _PAGE_PRESENT)
-             && (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
+        if ( mfn_eq(shadow_l2e_get_mfn(*sl2e), sl1mfn) )
         {
             shadow_set_l2e(d, sl2e, shadow_l2e_empty(), sl2mfn);
             if ( mfn_to_page(sl1mfn)->u.sh.type == 0 )
@@ -3669,13 +3651,10 @@ int cf_check sh_remove_l2_shadow(struct
 {
     shadow_l3e_t *sl3e;
     int done = 0;
-    int flags;
 
     FOREACH_PRESENT_L3E(sl3mfn, sl3e, NULL, done,
     {
-        flags = shadow_l3e_get_flags(*sl3e);
-        if ( (flags & _PAGE_PRESENT)
-             && (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
+        if ( mfn_eq(shadow_l3e_get_mfn(*sl3e), sl2mfn) )
         {
             shadow_set_l3e(d, sl3e, shadow_l3e_empty(), sl3mfn);
             if ( mfn_to_page(sl2mfn)->u.sh.type == 0 )
@@ -3691,13 +3670,10 @@ int cf_check sh_remove_l3_shadow(struct
 {
     shadow_l4e_t *sl4e;
     int done = 0;
-    int flags;
 
     FOREACH_PRESENT_L4E(sl4mfn, sl4e, NULL, done, d,
     {
-        flags = shadow_l4e_get_flags(*sl4e);
-        if ( (flags & _PAGE_PRESENT)
-             && (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
+        if ( mfn_eq(shadow_l4e_get_mfn(*sl4e), sl3mfn) )
         {
             shadow_set_l4e(d, sl4e, shadow_l4e_empty(), sl4mfn);
             if ( mfn_to_page(sl3mfn)->u.sh.type == 0 )




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.