[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 2/2] x86/paging: replace most mfn_valid() in log-dirty handling


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Mon, 6 Dec 2021 14:25:35 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=6gE0gFOUnMMz5JyRTUC/L9rSE2loRh2H07DZe6ZHJT0=; b=D+HCphIiP77AqoxzaIgPIvGsxTTaIbFtrkggcLQe9Gx9CWIkYyeIFSbDafgv+uSy4DoqzdG+cpX0hzrmL6P10Fuf3mCbyIQe8IYrOmxrvCeRqROHLR+RJ+CHw1ewkeXpc2qiKnu6sOhJ/cJjyoeZrs0/tX08T16JK6zpRO/w+f2Ca77Eh4kGe+IczDb/OZcaOBko3dgS+uu6Nl09jhzNTkAqzdNH0kYPeTwHrwQKFyAFpbnLzGlIqqIUJYj79yDucx/A86SaYs42FzuMZSPYjzPCG2SM7bYBc8WTUoHN+KGXLDNKsbG4EbfO4hFJc+fSYj/s7kmdsahswgwQTH6IGA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=kYXd/9OAYsq7kQVzWjWRcc3CDz1grDcbXCpknTmUpl17SaGVJshsZeTavCOCmGDMNe30as1GZugtnEgc7iTJFQMRH0CudLrx/KMpTY7oreZWwIQJUaRk9Rnw2USqRUKU2GSpBrfquVsz+dHs2aAmX5JIKuL95S4keTvyDTU78l0WSqA8auEg9BT11N8+JFZKcey0UGkiIefOIbcx95koMdWVyC3HHLvasR8z4NuFUC+wa9TsgGQQmqreMAw8zdmd7YTMGGQkvoSsV8pxy0N0MKMDF+blFTMYAqMRJsmmk0lhPh0vI7VVUmzQ7fy/roLGcEokSw4oFKjAzLDCt0xXtw==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>
  • Delivery-date: Mon, 06 Dec 2021 13:25:41 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Top level table and intermediate table entries get explicitly set to
INVALID_MFN when un-allocated. There's therefore no need to use the more
expensive mfn_valid() when checking for that sentinel.

Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2: New.

--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -74,7 +74,7 @@ static mfn_t paging_new_log_dirty_leaf(s
 {
     mfn_t mfn = paging_new_log_dirty_page(d);
 
-    if ( mfn_valid(mfn) )
+    if ( !mfn_eq(mfn, INVALID_MFN) )
         clear_domain_page(mfn);
 
     return mfn;
@@ -84,7 +84,8 @@ static mfn_t paging_new_log_dirty_leaf(s
 static mfn_t paging_new_log_dirty_node(struct domain *d)
 {
     mfn_t mfn = paging_new_log_dirty_page(d);
-    if ( mfn_valid(mfn) )
+
+    if ( !mfn_eq(mfn, INVALID_MFN) )
     {
         int i;
         mfn_t *node = map_domain_page(mfn);
@@ -98,7 +99,7 @@ static mfn_t paging_new_log_dirty_node(s
 /* get the top of the log-dirty bitmap trie */
 static mfn_t *paging_map_log_dirty_bitmap(struct domain *d)
 {
-    if ( likely(mfn_valid(d->arch.paging.log_dirty.top)) )
+    if ( likely(!mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN)) )
         return map_domain_page(d->arch.paging.log_dirty.top);
     return NULL;
 }
@@ -116,7 +117,7 @@ static int paging_free_log_dirty_bitmap(
 
     paging_lock(d);
 
-    if ( !mfn_valid(d->arch.paging.log_dirty.top) )
+    if ( mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN) )
     {
         paging_unlock(d);
         return 0;
@@ -143,20 +144,20 @@ static int paging_free_log_dirty_bitmap(
 
     for ( ; i4 < LOGDIRTY_NODE_ENTRIES; i4++, i3 = 0 )
     {
-        if ( !mfn_valid(l4[i4]) )
+        if ( mfn_eq(l4[i4], INVALID_MFN) )
             continue;
 
         l3 = map_domain_page(l4[i4]);
 
         for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
         {
-            if ( !mfn_valid(l3[i3]) )
+            if ( mfn_eq(l3[i3], INVALID_MFN) )
                 continue;
 
             l2 = map_domain_page(l3[i3]);
 
             for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
-                if ( mfn_valid(l2[i2]) )
+                if ( !mfn_eq(l2[i2], INVALID_MFN) )
                     paging_free_log_dirty_page(d, l2[i2]);
 
             unmap_domain_page(l2);
@@ -288,35 +289,35 @@ void paging_mark_pfn_dirty(struct domain
     /* Recursive: this is called from inside the shadow code */
     paging_lock_recursive(d);
 
-    if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) 
+    if ( unlikely(mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN)) )
     {
          d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d);
-         if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) )
+         if ( unlikely(mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN)) )
              goto out;
     }
 
     l4 = paging_map_log_dirty_bitmap(d);
     mfn = l4[i4];
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         l4[i4] = mfn = paging_new_log_dirty_node(d);
     unmap_domain_page(l4);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         goto out;
 
     l3 = map_domain_page(mfn);
     mfn = l3[i3];
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         l3[i3] = mfn = paging_new_log_dirty_node(d);
     unmap_domain_page(l3);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         goto out;
 
     l2 = map_domain_page(mfn);
     mfn = l2[i2];
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         l2[i2] = mfn = paging_new_log_dirty_leaf(d);
     unmap_domain_page(l2);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         goto out;
 
     l1 = map_domain_page(mfn);
@@ -370,25 +371,25 @@ bool paging_mfn_is_dirty(const struct do
         return false;
 
     mfn = d->arch.paging.log_dirty.top;
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return false;
 
     l4 = map_domain_page(mfn);
     mfn = l4[L4_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l4);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return false;
 
     l3 = map_domain_page(mfn);
     mfn = l3[L3_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l3);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return false;
 
     l2 = map_domain_page(mfn);
     mfn = l2[L2_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l2);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return false;
 
     l1 = map_domain_page(mfn);
@@ -477,17 +478,18 @@ static int paging_log_dirty_op(struct do
 
     for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
     {
-        l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(l4[i4]) : NULL;
+        l3 = ((l4 && !mfn_eq(l4[i4], INVALID_MFN)) ?
+              map_domain_page(l4[i4]) : NULL);
         for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
         {
-            l2 = ((l3 && mfn_valid(l3[i3])) ?
+            l2 = ((l3 && !mfn_eq(l3[i3], INVALID_MFN)) ?
                   map_domain_page(l3[i3]) : NULL);
             for ( i2 = 0;
                   (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
                   i2++ )
             {
                 unsigned int bytes = PAGE_SIZE;
-                l1 = ((l2 && mfn_valid(l2[i2])) ?
+                l1 = ((l2 && !mfn_eq(l2[i2], INVALID_MFN)) ?
                       map_domain_page(l2[i2]) : NULL);
                 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.