[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] x86/dom0: simplify dom0_compute_nr_pages



Remove the loop in dom0_compute_nr_pages.

This is a non-functional change for PV domains, that would always have
need_paging set to false, and thus only did a single loop iteration.

For a PVH Dom0 the loop is not needed anymore, since the amount of
memory needed for paging no longer depends on the amount of memory
assigned to Dom0, but rather on the value of max_pdx. Hence it can be
removed from the amount of available memory before doing the
accounting of memory assigned to Dom0.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/dom0_build.c | 56 +++++++++++++++++++++--------------------------
 1 file changed, 25 insertions(+), 31 deletions(-)

diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index e2be70c33f..7404091943 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -242,7 +242,6 @@ unsigned long __init dom0_compute_nr_pages(
     nodeid_t node;
     unsigned long avail = 0, nr_pages, min_pages, max_pages;
     unsigned long paging_pgs = dom0_paging_pages(d, max_pdx);
-    bool need_paging;
 
     for_each_node_mask ( node, dom0_nodes )
         avail += avail_domheap_pages_region(node, 0, 0) +
@@ -258,39 +257,34 @@ unsigned long __init dom0_compute_nr_pages(
     /* Reserve memory for iommu_dom0_init(). */
     if ( iommu_enabled )
         avail -= paging_pgs;
-
-    need_paging = is_hvm_domain(d) &&
-        (!iommu_hap_pt_share || !paging_mode_hap(d));
-    for ( ; ; need_paging = false )
-    {
-        nr_pages = dom0_nrpages;
-        min_pages = dom0_min_nrpages;
-        max_pages = dom0_max_nrpages;
-
+    if ( is_hvm_domain(d) && (!iommu_hap_pt_share || !paging_mode_hap(d)) )
         /*
-         * If allocation isn't specified, reserve 1/16th of available memory
-         * for things like DMA buffers. This reservation is clamped to a
-         * maximum of 128MB.
+         * Reserve memory for shadow or HAP if not sharing the page
+         * tables with the IOMMU.
          */
-        if ( nr_pages == 0 )
-            nr_pages = -min(avail / 16, 128UL << (20 - PAGE_SHIFT));
-
-        /* Negative specification means "all memory - specified amount". */
-        if ( (long)nr_pages  < 0 ) nr_pages  += avail;
-        if ( (long)min_pages < 0 ) min_pages += avail;
-        if ( (long)max_pages < 0 ) max_pages += avail;
-
-        /* Clamp according to min/max limits and available memory. */
-        nr_pages = max(nr_pages, min_pages);
-        nr_pages = min(nr_pages, max_pages);
-        nr_pages = min(nr_pages, avail);
-
-        if ( !need_paging )
-            break;
-
-        /* Reserve memory for shadow or HAP. */
         avail -= paging_pgs;
-    }
+
+    nr_pages = dom0_nrpages;
+    min_pages = dom0_min_nrpages;
+    max_pages = dom0_max_nrpages;
+
+    /*
+     * If allocation isn't specified, reserve 1/16th of available memory
+     * for things like DMA buffers. This reservation is clamped to a
+     * maximum of 128MB.
+     */
+    if ( nr_pages == 0 )
+        nr_pages = -min(avail / 16, 128UL << (20 - PAGE_SHIFT));
+
+    /* Negative specification means "all memory - specified amount". */
+    if ( (long)nr_pages  < 0 ) nr_pages  += avail;
+    if ( (long)min_pages < 0 ) min_pages += avail;
+    if ( (long)max_pages < 0 ) max_pages += avail;
+
+    /* Clamp according to min/max limits and available memory. */
+    nr_pages = max(nr_pages, min_pages);
+    nr_pages = min(nr_pages, max_pages);
+    nr_pages = min(nr_pages, avail);
 
     if ( is_pv_domain(d) &&
          (parms->p2m_base == UNSET_ADDR) && (dom0_nrpages <= 0) &&
-- 
2.13.5 (Apple Git-94)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.