[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v5 5/7] xen: re-define assign_pages and introduce assign_page


  • To: <xen-devel@xxxxxxxxxxxxxxxxxxxx>, <sstabellini@xxxxxxxxxx>, <julien@xxxxxxx>
  • From: Penny Zheng <penny.zheng@xxxxxxx>
  • Date: Tue, 24 Aug 2021 09:50:43 +0000
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 40.67.248.234) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=IJALBkaGXtysRE/87767gpew+VdKasYR1FGSjePRRnw=; b=ZdO/EkqaQPIrPgQ+4INlaJ/re8NN8qgjMr/c4ZX1QnpfpX/s64L3I2prAgAx99LRLPvEcVc4vG6lPlWfKbLdduzCaoB7f7KTK3qdWAvorpT29ADCzxWjtz0gKHtdGWYurWX02NUBPcWyphNyCGAZmDRgEiYyxmFbTLXkX+rv/ou8xKn5ybJGJDqL3rpiux6TDESvrDlhNjuhX7mBKk5LuJLnQr9G1+5khppg5HqDjg4ROIGZAbnr0w2DJkG47RuBykvN2rqb76r6Abt8cDV8wMCIGBMbUFofhcAJ7MgBW50PywIqbPE/YjrEpjeorYGH4wgD2tXqOzgUgQPm3NUZhA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=nSu4DArNGnjLz665LvGmlp4ThA5V697BWwc8RWdxDY1XJJ5rfsaRt7CBzH0G22CYc7bUZeLRCZEYpIf1rDPDY2nfRar/gWefmbFJPY+vh2L7lF1LAdp0rFUrHLGl8RAe+M9WOs5TdluZoK2oTQ/RH2oA5iWyRqqg3loYhH5LsnIJ28cI3W3phD3y/NXM/GIw7yf0CzlxJE1Lxtm3hmpR373qRln9RiPOtpHVol2M3HSeTL1SRjTolbGh8ZTvUOVvQczdt5b4AD2Mmq3QE1pAMCfFNf/wFxMzpV+F66Og1ODKoCaBC1O0d3BfRExaA5C7fSgsDoSjAhy/iQshC8DNJw==
  • Cc: <Bertrand.Marquis@xxxxxxx>, <Penny.Zheng@xxxxxxx>, <Wei.Chen@xxxxxxx>, <nd@xxxxxxx>
  • Delivery-date: Tue, 24 Aug 2021 09:51:42 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true

In order to deal with the trouble of count-to-order conversion when page number
is not in a power-of-two, this commit re-define assign_pages for nr pages and
assign_page for original page with a single order.

Signed-off-by: Penny Zheng <penny.zheng@xxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v5 change:
- Having both functions assign_pages/assign_page with similar parameter
arrangement
---
 xen/arch/x86/pv/dom0_build.c |  2 +-
 xen/common/grant_table.c     |  2 +-
 xen/common/memory.c          |  4 ++--
 xen/common/page_alloc.c      | 21 +++++++++++++--------
 xen/include/xen/mm.h         |  6 ++++++
 5 files changed, 23 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index d5a1a6a4e2..ebbafe48f3 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -557,7 +557,7 @@ int __init dom0_construct_pv(struct domain *d,
         else
         {
             while ( count-- )
-                if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) )
+                if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 1, 0) )
                     BUG();
         }
         initrd->mod_end = 0;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index fab77ab9cc..1e138201a5 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2342,7 +2342,7 @@ gnttab_transfer(
          * is respected and speculative execution is blocked accordingly
          */
         if ( unlikely(!evaluate_nospec(okay)) ||
-            unlikely(assign_pages(e, page, 0, MEMF_no_refcount)) )
+            unlikely(assign_pages(e, page, 1, MEMF_no_refcount)) )
         {
             bool drop_dom_ref;
 
diff --git a/xen/common/memory.c b/xen/common/memory.c
index e07bd9a5ea..e22dad2c37 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -728,7 +728,7 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
         /* Assign each output page to the domain. */
         for ( j = 0; (page = page_list_remove_head(&out_chunk_list)); ++j )
         {
-            if ( assign_pages(d, page, exch.out.extent_order,
+            if ( assign_page(d, page, exch.out.extent_order,
                               MEMF_no_refcount) )
             {
                 unsigned long dec_count;
@@ -797,7 +797,7 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
      * cleared PGC_allocated.
      */
     while ( (page = page_list_remove_head(&in_chunk_list)) )
-        if ( assign_pages(d, page, 0, MEMF_no_refcount) )
+        if ( assign_pages(d, page, 1, MEMF_no_refcount) )
         {
             BUG_ON(!d->is_dying);
             free_domheap_page(page);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 2b4591bc56..05c9834dc2 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2260,7 +2260,7 @@ void init_domheap_pages(paddr_t ps, paddr_t pe)
 int assign_pages(
     struct domain *d,
     struct page_info *pg,
-    unsigned int order,
+    unsigned long nr,
     unsigned int memflags)
 {
     int rc = 0;
@@ -2280,7 +2280,7 @@ int assign_pages(
     {
         unsigned int extra_pages = 0;
 
-        for ( i = 0; i < (1ul << order); i++ )
+        for ( i = 0; i < nr; i++ )
         {
             ASSERT(!(pg[i].count_info & ~PGC_extra));
             if ( pg[i].count_info & PGC_extra )
@@ -2289,18 +2289,18 @@ int assign_pages(
 
         ASSERT(!extra_pages ||
                ((memflags & MEMF_no_refcount) &&
-                extra_pages == 1u << order));
+                extra_pages == nr));
     }
 #endif
 
     if ( pg[0].count_info & PGC_extra )
     {
-        d->extra_pages += 1u << order;
+        d->extra_pages += nr;
         memflags &= ~MEMF_no_refcount;
     }
     else if ( !(memflags & MEMF_no_refcount) )
     {
-        unsigned int tot_pages = domain_tot_pages(d) + (1 << order);
+        unsigned int tot_pages = domain_tot_pages(d) + nr;
 
         if ( unlikely(tot_pages > d->max_pages) )
         {
@@ -2312,10 +2312,10 @@ int assign_pages(
     }
 
     if ( !(memflags & MEMF_no_refcount) &&
-         unlikely(domain_adjust_tot_pages(d, 1 << order) == (1 << order)) )
+         unlikely(domain_adjust_tot_pages(d, nr) == nr) )
         get_knownalive_domain(d);
 
-    for ( i = 0; i < (1 << order); i++ )
+    for ( i = 0; i < nr; i++ )
     {
         ASSERT(page_get_owner(&pg[i]) == NULL);
         page_set_owner(&pg[i], d);
@@ -2330,6 +2330,11 @@ int assign_pages(
     return rc;
 }
 
+int assign_page(struct domain *d, struct page_info *pg, unsigned int order,
+                unsigned int memflags)
+{
+    return assign_pages(d, pg, 1UL << order, memflags);
+}
 
 struct page_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int memflags)
@@ -2372,7 +2377,7 @@ struct page_info *alloc_domheap_pages(
                 pg[i].count_info = PGC_extra;
             }
         }
-        if ( assign_pages(d, pg, order, memflags) )
+        if ( assign_page(d, pg, order, memflags) )
         {
             free_heap_pages(pg, order, memflags & MEMF_no_scrub);
             return NULL;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 8e8fb5a615..f243ff88d7 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -132,6 +132,12 @@ int query_page_offline(mfn_t mfn, uint32_t *status);
 void heap_init_late(void);
 
 int assign_pages(
+    struct domain *d,
+    struct page_info *pg,
+    unsigned long nr,
+    unsigned int memflags);
+
+int assign_page(
     struct domain *d,
     struct page_info *pg,
     unsigned int order,
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.