[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 3/3] xen/mm: improve freeing of partially scrubbed pages


  • To: xen-devel@xxxxxxxxxxxxxxxxxxxx
  • From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
  • Date: Thu, 26 Mar 2026 09:51:45 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=citrix.com; dmarc=pass action=none header.from=citrix.com; dkim=pass header.d=citrix.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=NRuxAg7+DFWaBUvv3afEF63Z3k/Yz3QGkGiV9EBbvyo=; b=s5+9rhvNzvmH1pLCTsvebd9eznNYEASebndo8oYJ9M+0SylPkUwlMP0Jr20F8XlHh/HMTbZCEcKmljZszi4o17kp8PW2neywoK+8OJqbaHO3cJ5T3gE2ghc80sazKvGLVFrbJvzDyIUQfR9NiI5jCNGZZi6EMrCbIXoExStrcETaJYzN7OhfBq5E42FGOj9GJUHe4CWEQsqZyIlOZkQtpZ3/B5kary7zaY16WSzocF1RMWe1HqVTTT7dU9DjT4olBsJwe4VMBjwbMZhuRzl04Ilz9x5AteHxUfxmlvBtN3u4zH4Ld0qAo+J9gB43eUiXF1MAiVnGxYpRSeX5kQ6p+g==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=BAl/0Wo4RO6Dp5ROoPlzt2DYDukI7FQdftIE2AUZlW+rG8i+XLNuwpd/zp//KogPrEjEsTpvfwiZqM0lfurctsITp1FsN4FJPaZQI2/D8dEad+WJpbzUBNSagoXkOrVDzM95qxC7w0fM38LzfZp6FdoXBeDdTr2z0t3W40Db6skaxshgJDZfkhRllrnyRNa16jS3RabdMFTMw+ZE3HxIMLkiKFQ7HG+FRZDAKKDxapJUsS9yQIzluCmxJhR4BHqZ/dbY2ZLf4E8YjOE0mYiRxYpHs/sDlRHiiwwJvTY1SCDvIfUdmTpTEfiwoIDSpqocQLEHxQh1TFiSipB2DeKOjg==
  • Authentication-results: eu.smtp.expurgate.cloud; dkim=pass header.s=selector1 header.d=citrix.com header.i="@citrix.com" header.h="From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck"
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=citrix.com;
  • Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Michal Orzel <michal.orzel@xxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>
  • Delivery-date: Thu, 26 Mar 2026 08:52:10 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

When freeing possibly partially scrubbed pages in populate_physmap() the
whole page is marked as dirty, but that's not fully accurate.  Since the
PGC_need_scrub bit is preserved for the populate_physmap() allocation we
can use those when freeing to detect which pages need scrubbing instead of
marking the whole page as dirty.

This requires exposing free_heap_pages() globally, and switching
populate_physmap() to use it instead of free_domheap_pages().

Suggested-by: Jan Beulich <jbeulich@xxxxxxxx>
Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Jan: I'm not sure if that's what you suggested in the review of v1.  I've
added your Suggested-by but I can drop it if that's not what you were
thinking of.
---
 xen/common/memory.c     |  6 +++---
 xen/common/page_alloc.c | 16 +++++++++++++---
 xen/include/xen/mm.h    |  6 ++++++
 3 files changed, 22 insertions(+), 6 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index 1ad4b51c5b02..68eef8291571 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -177,7 +177,7 @@ static void stash_allocation(struct domain *d, struct 
page_info *page,
      * interface is designed to be used for single-threaded domain creation.
      */
     if ( d->pending_scrub || d->is_dying )
-        free_domheap_pages(page, order);
+        free_heap_pages(page, order, false);
     else
     {
         d->pending_scrub_index = scrub_index;
@@ -210,7 +210,7 @@ static struct page_info *get_stashed_allocation(struct 
domain *d,
             *scrub_index = d->pending_scrub_index;
         }
         else
-            free_domheap_pages(d->pending_scrub, d->pending_scrub_order);
+            free_heap_pages(d->pending_scrub, d->pending_scrub_order, false);
 
         /*
          * The caller now owns the page or it has been freed, clear stashed
@@ -391,7 +391,7 @@ static void populate_physmap(struct memop_args *a)
 
                     if ( assign_page(page, a->extent_order, d, memflags) )
                     {
-                        free_domheap_pages(page, a->extent_order);
+                        free_heap_pages(page, a->extent_order, false);
                         goto out;
                     }
                 }
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index b1edef87124f..8fc9b5a27f1b 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1529,13 +1529,13 @@ static bool mark_page_free(struct page_info *pg, mfn_t 
mfn)
 static void free_color_heap_page(struct page_info *pg, bool need_scrub);
 
 /* Free 2^@order set of pages. */
-static void free_heap_pages(
-    struct page_info *pg, unsigned int order, bool need_scrub)
+void free_heap_pages(struct page_info *pg, unsigned int order, bool need_scrub)
 {
     unsigned long mask;
     mfn_t mfn = page_to_mfn(pg);
     unsigned int i, node = mfn_to_nid(mfn);
     unsigned int zone = page_to_zone(pg);
+    unsigned int first_dirty = INVALID_DIRTY_IDX, dirty_cnt = 0;
     bool pg_offlined = false;
 
     ASSERT(order <= MAX_ORDER);
@@ -1552,6 +1552,13 @@ static void free_heap_pages(
             pg[i].count_info |= PGC_need_scrub;
             poison_one_page(&pg[i]);
         }
+        else if ( test_bit(_PGC_need_scrub, &pg[i].count_info) )
+        {
+            /* The caller might have returned pages pending scrub. */
+            if ( first_dirty == INVALID_DIRTY_IDX )
+                first_dirty = i;
+            dirty_cnt++;
+        }
 
         if ( pg->count_info & PGC_colored )
         {
@@ -1571,7 +1578,10 @@ static void free_heap_pages(
         pg->u.free.first_dirty = 0;
     }
     else
-        pg->u.free.first_dirty = INVALID_DIRTY_IDX;
+    {
+        node_need_scrub[node] += dirty_cnt;
+        pg->u.free.first_dirty = first_dirty;
+    }
 
     /* Merge chunks as far as possible. */
     while ( order < MAX_ORDER )
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index b80bec00c124..0b192caa07bc 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -153,6 +153,12 @@ unsigned long avail_node_heap_pages(unsigned int nodeid);
 } while ( false )
 #define FREE_DOMHEAP_PAGE(p) FREE_DOMHEAP_PAGES(p, 0)
 
+/*
+ * Most callers should use free_{xen,dom}heap_pages() instead of directly
+ * calling free_heap_pages().
+ */
+void free_heap_pages(struct page_info *pg, unsigned int order, bool 
need_scrub);
+
 void scrub_one_page(const struct page_info *pg, bool cold);
 
 int online_page(mfn_t mfn, uint32_t *status);
-- 
2.51.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.