[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v1 59/74] xen/pvshim: add shim_mem cmdline parameter



From: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
 docs/misc/xen-command-line.markdown | 16 +++++++++++++
 xen/arch/x86/dom0_build.c           | 18 ++++++++++++++-
 xen/arch/x86/pv/shim.c              | 46 +++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/pv/shim.h       |  7 ++++++
 4 files changed, 86 insertions(+), 1 deletion(-)

diff --git a/docs/misc/xen-command-line.markdown 
b/docs/misc/xen-command-line.markdown
index 3a1a9c1fba..9f51710a46 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -686,6 +686,8 @@ any dom0 autoballooning feature present in your toolstack. 
See the
 _xl.conf(5)_ man page or [Xen Best
 
Practices](http://wiki.xen.org/wiki/Xen_Best_Practices#Xen_dom0_dedicated_memory_and_preventing_dom0_memory_ballooning).
 
+This option doesn't have effect if pv-shim mode is enabled.
+
 ### dom0\_nodes
 
 > `= List of [ <integer> | relaxed | strict ]`
@@ -1456,6 +1458,20 @@ guest compatibly inside an HVM container.
 In this mode, the kernel and initrd passed as modules to the hypervisor are
 constructed into a plain unprivileged PV domain.
 
+### shim\_mem (x86)
+> `= List of ( min:<size> | max:<size> | <size> )`
+
+Set the amount of memory that xen-shim reserves for itself. Only has effect
+if pv-shim mode is enabled.
+
+* `min:<size>` specifies the minimum amount of memory. Ignored if greater
+   than max. Default: 10M.
+* `max:<size>` specifies the maximum amount of memory. Default: 128M.
+* `<size>` specifies the exact amount of memory. Overrides both min and max.
+
+By default, 1/16th of total HVM container's memory is reserved for xen-shim
+with minimum amount being 10MB and maximum amount 128MB.
+
 ### rcu-idle-timer-period-ms
 > `= <integer>`
 
diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 1c5853690a..1b0b89fdeb 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -51,6 +51,13 @@ static long __init parse_amt(const char *s, const char **ps)
 
 static int __init parse_dom0_mem(const char *s)
 {
+    /* xen-shim uses shim_mem parameter instead of dom0_mem */
+    if ( pv_shim )
+    {
+        printk("Ignoring dom0_mem param in pv-shim mode\n");
+        return 0;
+    }
+
     do {
         if ( !strncmp(s, "min:", 4) )
             dom0_min_nrpages = parse_amt(s+4, &s);
@@ -284,7 +291,16 @@ unsigned long __init dom0_compute_nr_pages(
          * maximum of 128MB.
          */
         if ( nr_pages == 0 )
-            nr_pages = -min(avail / 16, 128UL << (20 - PAGE_SHIFT));
+        {
+            uint64_t rsvd = min(avail / 16, 128UL << (20 - PAGE_SHIFT));
+            if ( pv_shim )
+            {
+                rsvd = pv_shim_mem(avail);
+                printk("Reserved %lu pages for xen-shim\n", rsvd);
+
+            }
+            nr_pages = -rsvd;
+        }
 
         /* Negative specification means "all memory - specified amount". */
         if ( (long)nr_pages  < 0 ) nr_pages  += avail;
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index 56ecaea2d2..c24adacbc7 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -40,6 +40,52 @@ bool pv_shim;
 boolean_param("pv-shim", pv_shim);
 #endif
 
+/*
+ * By default, 1/16th of total HVM container's memory is reserved for xen-shim
+ * with minimum amount being 10MB and maximum amount 128MB. Some users may wish
+ * to tune this constants for better memory utilization. This can be achieved
+ * using the following xen-shim's command line option:
+ *
+ * shim_mem=[min:<min_amt>,][max:<max_amt>,][<amt>]
+ *
+ * <min_amt>: The minimum amount of memory that should be allocated for 
xen-shim
+ *            (ignored if greater than max)
+ * <max_amt>: The maximum amount of memory that should be allocated for 
xen-shim
+ * <amt>:     The precise amount of memory to allocate for xen-shim
+ *            (overrides both min and max)
+ */
+static uint64_t __initdata shim_nrpages;
+static uint64_t __initdata shim_min_nrpages = 10UL << (20 - PAGE_SHIFT);
+static uint64_t __initdata shim_max_nrpages = 128UL << (20 - PAGE_SHIFT);
+
+static int __init parse_shim_mem(const char *s)
+{
+    do {
+        if ( !strncmp(s, "min:", 4) )
+            shim_min_nrpages = parse_size_and_unit(s+4, &s) >> PAGE_SHIFT;
+        else if ( !strncmp(s, "max:", 4) )
+            shim_max_nrpages = parse_size_and_unit(s+4, &s) >> PAGE_SHIFT;
+        else
+            shim_nrpages = parse_size_and_unit(s, &s) >> PAGE_SHIFT;
+    } while ( *s++ == ',' );
+
+    return s[-1] ? -EINVAL : 0;
+}
+custom_param("shim_mem", parse_shim_mem);
+
+uint64_t pv_shim_mem(uint64_t avail)
+{
+    uint64_t rsvd = min(avail / 16, shim_max_nrpages);
+
+    if ( shim_nrpages )
+        return shim_nrpages;
+
+    if ( shim_min_nrpages <= shim_max_nrpages )
+        rsvd = max(rsvd, shim_min_nrpages);
+
+    return rsvd;
+}
+
 static unsigned int nr_grant_list;
 static unsigned long *grant_frames;
 static DEFINE_SPINLOCK(grant_lock);
diff --git a/xen/include/asm-x86/pv/shim.h b/xen/include/asm-x86/pv/shim.h
index 0207348a85..00906f884b 100644
--- a/xen/include/asm-x86/pv/shim.h
+++ b/xen/include/asm-x86/pv/shim.h
@@ -41,6 +41,7 @@ void pv_shim_inject_evtchn(unsigned int port);
 long pv_shim_grant_table_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) uop,
                             unsigned int count, bool compat);
 domid_t get_dom0_domid(void);
+uint64_t pv_shim_mem(uint64_t avail);
 
 #else
 
@@ -80,6 +81,12 @@ static inline domid_t get_dom0_domid(void)
     return 0;
 }
 
+static inline uint64_t pv_shim_mem(uint64_t avail)
+{
+    ASSERT_UNREACHABLE();
+    return 0;
+}
+
 #endif
 
 #endif /* __X86_PV_SHIM_H__ */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.