[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 09/18] mini-os: modify virtual memory layout for support of ballooning



In order to be able to support ballooning the virtual memory layout
of Mini-OS has to be modified: instead of a (nearly) consecutive
area used for physical memory mapping, on demand mappings, and heap
we need enough spare place for adding new memory.

So instead of dynamically place the different regions based on found
memory size locate them statically at fixed virtual addresses:

area                           x86-64               x86-32
------------------------------------------------------------
mapped physical memory       00000000             00000000
kernel virtual mappings    8000000000             3f000000
demand mappings          100000000000             40000000
heap                     200000000000             b0000000

This will enable Mini-OS to support up to 512GB of domain memory with
a 64 bit kernel and nearly 1GB with a 32 bit kernel.

For a 32 bit Mini-OS we have to avoid a conflict between heap and
m2p table which the hypervisor maps at f5600000. So the demand mapping
size is reduced by 256MB in order to keep the heap at about 1GB.

The kernel virtual mappings are a new area needed for being able to
grow the p2m list without having to relocate it in physical memory.

Modify the placement of the demand mappings and heap and adjust the
memory layout description.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
V2: avoid conflict with hypervisor mapped area on 32 bits
---
 arch/arm/mm.c         |  2 +-
 arch/x86/mm.c         | 44 +++++++++++---------------------------------
 include/mm.h          |  2 +-
 include/x86/arch_mm.h | 35 ++++++++++++++++++++++++++++++-----
 mm.c                  |  2 +-
 5 files changed, 44 insertions(+), 41 deletions(-)

diff --git a/arch/arm/mm.c b/arch/arm/mm.c
index efecc51..f75888d 100644
--- a/arch/arm/mm.c
+++ b/arch/arm/mm.c
@@ -75,7 +75,7 @@ void arch_init_p2m(unsigned long max_pfn)
 {
 }
 
-void arch_init_demand_mapping_area(unsigned long cur_pfn)
+void arch_init_demand_mapping_area(void)
 {
 }
 
diff --git a/arch/x86/mm.c b/arch/x86/mm.c
index c59a5d3..6aa4468 100644
--- a/arch/x86/mm.c
+++ b/arch/x86/mm.c
@@ -442,37 +442,21 @@ pgentry_t *need_pgt(unsigned long va)
  * Reserve an area of virtual address space for mappings and Heap
  */
 static unsigned long demand_map_area_start;
-#ifdef __x86_64__
-#define DEMAND_MAP_PAGES ((128ULL << 30) / PAGE_SIZE)
-#else
-#define DEMAND_MAP_PAGES ((2ULL << 30) / PAGE_SIZE)
-#endif
-
-#ifndef HAVE_LIBC
-#define HEAP_PAGES 0
-#else
+static unsigned long demand_map_area_end;
+#ifdef HAVE_LIBC
 unsigned long heap, brk, heap_mapped, heap_end;
-#ifdef __x86_64__
-#define HEAP_PAGES ((128ULL << 30) / PAGE_SIZE)
-#else
-#define HEAP_PAGES ((1ULL << 30) / PAGE_SIZE)
-#endif
 #endif
 
-void arch_init_demand_mapping_area(unsigned long cur_pfn)
+void arch_init_demand_mapping_area(void)
 {
-    cur_pfn++;
-
-    demand_map_area_start = (unsigned long) pfn_to_virt(cur_pfn);
-    cur_pfn += DEMAND_MAP_PAGES;
-    printk("Demand map pfns at %lx-%p.\n", 
-           demand_map_area_start, pfn_to_virt(cur_pfn));
+    demand_map_area_start = VIRT_DEMAND_AREA;
+    demand_map_area_end = demand_map_area_start + DEMAND_MAP_PAGES * PAGE_SIZE;
+    printk("Demand map pfns at %lx-%lx.\n", demand_map_area_start,
+           demand_map_area_end);
 
 #ifdef HAVE_LIBC
-    cur_pfn++;
-    heap_mapped = brk = heap = (unsigned long) pfn_to_virt(cur_pfn);
-    cur_pfn += HEAP_PAGES;
-    heap_end = (unsigned long) pfn_to_virt(cur_pfn);
+    heap_mapped = brk = heap = VIRT_HEAP_AREA;
+    heap_end = heap_mapped + HEAP_PAGES * PAGE_SIZE;
     printk("Heap resides at %lx-%lx.\n", brk, heap_end);
 #endif
 }
@@ -729,14 +713,8 @@ void arch_init_mm(unsigned long* start_pfn_p, unsigned 
long* max_pfn_p)
     start_pfn = PFN_UP(to_phys(start_info.pt_base)) + start_info.nr_pt_frames;
     max_pfn = start_info.nr_pages;
 
-    /* We need room for demand mapping and heap, clip available memory */
-#if defined(__i386__)
-    {
-        unsigned long virt_pfns = 1 + DEMAND_MAP_PAGES + 1 + HEAP_PAGES;
-        if (max_pfn + virt_pfns >= 0x100000)
-            max_pfn = 0x100000 - virt_pfns - 1;
-    }
-#endif
+    if ( max_pfn >= MAX_MEM_SIZE / PAGE_SIZE )
+        max_pfn = MAX_MEM_SIZE / PAGE_SIZE - 1;
 
     printk("  start_pfn: %lx\n", start_pfn);
     printk("    max_pfn: %lx\n", max_pfn);
diff --git a/include/mm.h b/include/mm.h
index b97b43e..a22dcd1 100644
--- a/include/mm.h
+++ b/include/mm.h
@@ -59,7 +59,7 @@ static __inline__ int get_order(unsigned long size)
     return order;
 }
 
-void arch_init_demand_mapping_area(unsigned long max_pfn);
+void arch_init_demand_mapping_area(void);
 void arch_init_mm(unsigned long* start_pfn_p, unsigned long* max_pfn_p);
 void arch_init_p2m(unsigned long max_pfn_p);
 
diff --git a/include/x86/arch_mm.h b/include/x86/arch_mm.h
index f756dab..d87fe55 100644
--- a/include/x86/arch_mm.h
+++ b/include/x86/arch_mm.h
@@ -49,11 +49,13 @@
  *
  * Virtual address space usage:
  *
- * 1:1 mapping of physical memory starting at VA(0)
- * 1 unallocated page
- * demand map area (32 bits: 2 GB, 64 bits: 128 GB) for virtual allocations
- * 1 unallocated page
- * with libc: heap area (32 bits: 1 GB, 64 bits: 128 GB)
+ *  area                           x86-64               x86-32
+ *  ------------------------------------------------------------
+ *  mapped physical memory       00000000             00000000
+ *  kernel virtual mappings    8000000000             3f000000
+ *  demand mappings          100000000000             40000000
+ *  heap (with libc only)    200000000000             b0000000
+ *
  */
 
 #define L1_FRAME                1
@@ -81,6 +83,15 @@
 typedef uint64_t pgentry_t;
 #endif
 
+#define MAX_MEM_SIZE            0x3f000000UL
+#define VIRT_KERNEL_AREA        0x3f000000UL
+#define VIRT_DEMAND_AREA        0x40000000UL
+#define VIRT_HEAP_AREA          0xb0000000UL
+
+#define DEMAND_MAP_PAGES        0x6ffffUL
+#define HEAP_PAGES_MAX          ((HYPERVISOR_VIRT_START - VIRT_HEAP_AREA) / \
+                                 PAGE_SIZE - 1)
+
 #elif defined(__x86_64__)
 
 #define L2_PAGETABLE_SHIFT      21
@@ -106,6 +117,20 @@ typedef uint64_t pgentry_t;
 typedef unsigned long pgentry_t;
 #endif
 
+#define MAX_MEM_SIZE            (512ULL << 30)
+#define VIRT_KERNEL_AREA        0x0000008000000000UL
+#define VIRT_DEMAND_AREA        0x0000100000000000UL
+#define VIRT_HEAP_AREA          0x0000200000000000UL
+
+#define DEMAND_MAP_PAGES        0x8000000UL
+#define HEAP_PAGES_MAX          0x8000000UL
+
+#endif
+
+#ifndef HAVE_LIBC
+#define HEAP_PAGES 0
+#else
+#define HEAP_PAGES   HEAP_PAGES_MAX
 #endif
 
 #define L1_MASK  ((1UL << L2_PAGETABLE_SHIFT) - 1)
diff --git a/mm.c b/mm.c
index 25ee3da..ff071ca 100644
--- a/mm.c
+++ b/mm.c
@@ -374,7 +374,7 @@ void init_mm(void)
 
     arch_init_p2m(max_pfn);
     
-    arch_init_demand_mapping_area(max_pfn);
+    arch_init_demand_mapping_area();
 }
 
 void fini_mm(void)
-- 
2.6.6


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.