[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [PATCH v3 17/19] mini-os: add support for ballooning up



Add support for ballooning the domain up by a specified amount of
pages. Following steps are performed:

- extending the p2m map
- extending the page allocator's bitmap
- getting new memory pages from the hypervisor
- adding the memory at the current end of guest memory

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
Reviewed-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxx>
---
V3: change "if" to "while" in balloon_up() as requested by Samuel Thibault
---
 arch/arm/balloon.c |  9 ++++++
 arch/x86/balloon.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 balloon.c          | 64 +++++++++++++++++++++++++++++++++++++
 include/balloon.h  |  5 +++
 mm.c               |  4 +++
 5 files changed, 176 insertions(+)

diff --git a/arch/arm/balloon.c b/arch/arm/balloon.c
index 549e51b..7f35328 100644
--- a/arch/arm/balloon.c
+++ b/arch/arm/balloon.c
@@ -27,4 +27,13 @@
 
 unsigned long virt_kernel_area_end;   /* TODO: find a virtual area */
 
+int arch_expand_p2m(unsigned long max_pfn)
+{
+    return 0;
+}
+
+void arch_pfn_add(unsigned long pfn, unsigned long mfn)
+{
+}
+
 #endif
diff --git a/arch/x86/balloon.c b/arch/x86/balloon.c
index a7f20e4..42389e4 100644
--- a/arch/x86/balloon.c
+++ b/arch/x86/balloon.c
@@ -23,6 +23,7 @@
 
 #include <mini-os/os.h>
 #include <mini-os/balloon.h>
+#include <mini-os/errno.h>
 #include <mini-os/lib.h>
 #include <mini-os/mm.h>
 
@@ -30,9 +31,36 @@
 
 unsigned long virt_kernel_area_end = VIRT_KERNEL_AREA;
 
+static void p2m_invalidate(unsigned long *list, unsigned long start_idx)
+{
+    unsigned long idx;
+
+    for ( idx = start_idx; idx < P2M_ENTRIES; idx++ )
+        list[idx] = INVALID_P2M_ENTRY;
+}
+
+static inline unsigned long *p2m_l3list(void)
+{
+    return 
mfn_to_virt(HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list);
+}
+
+static inline unsigned long *p2m_to_virt(unsigned long p2m)
+{
+    return ( p2m == INVALID_P2M_ENTRY ) ? NULL : mfn_to_virt(p2m);
+}
+
 void arch_remap_p2m(unsigned long max_pfn)
 {
     unsigned long pfn;
+    unsigned long *l3_list, *l2_list, *l1_list;
+
+    l3_list = p2m_l3list();
+    l2_list = p2m_to_virt(l3_list[L3_P2M_IDX(max_pfn - 1)]);
+    l1_list = p2m_to_virt(l2_list[L2_P2M_IDX(max_pfn - 1)]);
+
+    p2m_invalidate(l3_list, L3_P2M_IDX(max_pfn - 1) + 1);
+    p2m_invalidate(l2_list, L2_P2M_IDX(max_pfn - 1) + 1);
+    p2m_invalidate(l1_list, L1_P2M_IDX(max_pfn - 1) + 1);
 
     if ( p2m_pages(nr_max_pages) <= p2m_pages(max_pfn) )
         return;
@@ -50,4 +78,70 @@ void arch_remap_p2m(unsigned long max_pfn)
     ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
 }
 
+int arch_expand_p2m(unsigned long max_pfn)
+{
+    unsigned long pfn;
+    unsigned long *l1_list, *l2_list, *l3_list;
+
+    p2m_chk_pfn(max_pfn - 1);
+    l3_list = p2m_l3list();
+
+    for ( pfn = (HYPERVISOR_shared_info->arch.max_pfn + P2M_MASK) & ~P2M_MASK;
+          pfn < max_pfn; pfn += P2M_ENTRIES )
+    {
+        l2_list = p2m_to_virt(l3_list[L3_P2M_IDX(pfn)]);
+        if ( !l2_list )
+        {
+            l2_list = (unsigned long*)alloc_page();
+            if ( !l2_list )
+                return -ENOMEM;
+            p2m_invalidate(l2_list, 0);
+            l3_list[L3_P2M_IDX(pfn)] = virt_to_mfn(l2_list);
+        }
+        l1_list = p2m_to_virt(l2_list[L2_P2M_IDX(pfn)]);
+        if ( !l1_list )
+        {
+            l1_list = (unsigned long*)alloc_page();
+            if ( !l1_list )
+                return -ENOMEM;
+            p2m_invalidate(l1_list, 0);
+            l2_list[L2_P2M_IDX(pfn)] = virt_to_mfn(l1_list);
+
+            if ( map_frame_rw((unsigned long)(phys_to_machine_mapping + pfn),
+                              l2_list[L2_P2M_IDX(pfn)]) )
+                return -ENOMEM;
+        }
+    }
+
+    HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
+
+    /* Make sure the new last page can be mapped. */
+    if ( !need_pgt((unsigned long)pfn_to_virt(max_pfn - 1)) )
+        return -ENOMEM;
+
+    return 0;
+}
+
+void arch_pfn_add(unsigned long pfn, unsigned long mfn)
+{
+    mmu_update_t mmu_updates[1];
+    pgentry_t *pgt;
+    int rc;
+
+    phys_to_machine_mapping[pfn] = mfn;
+
+    pgt = need_pgt((unsigned long)pfn_to_virt(pfn));
+    ASSERT(pgt);
+    mmu_updates[0].ptr = virt_to_mach(pgt) | MMU_NORMAL_PT_UPDATE;
+    mmu_updates[0].val = (pgentry_t)(mfn << PAGE_SHIFT) |
+                         _PAGE_PRESENT | _PAGE_RW;
+    rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF);
+    if ( rc < 0 )
+    {
+        printk("ERROR: build_pagetable(): PTE could not be updated\n");
+        printk("       mmu_update failed with rc=%d\n", rc);
+        do_exit();
+    }
+}
+
 #endif
diff --git a/balloon.c b/balloon.c
index 78b30af..07ef532 100644
--- a/balloon.c
+++ b/balloon.c
@@ -23,11 +23,13 @@
 
 #include <mini-os/os.h>
 #include <mini-os/balloon.h>
+#include <mini-os/errno.h>
 #include <mini-os/lib.h>
 #include <xen/xen.h>
 #include <xen/memory.h>
 
 unsigned long nr_max_pages;
+unsigned long nr_mem_pages;
 
 void get_max_pages(void)
 {
@@ -62,3 +64,65 @@ void mm_alloc_bitmap_remap(void)
     virt_kernel_area_end += round_pgup((nr_max_pages + 1) >> (PAGE_SHIFT + 3));
     ASSERT(virt_kernel_area_end <= VIRT_DEMAND_AREA);
 }
+
+#define N_BALLOON_FRAMES 64
+static unsigned long balloon_frames[N_BALLOON_FRAMES];
+
+int balloon_up(unsigned long n_pages)
+{
+    unsigned long page, pfn;
+    int rc;
+    struct xen_memory_reservation reservation = {
+        .address_bits = 0,
+        .extent_order = 0,
+        .domid        = DOMID_SELF
+    };
+
+    if ( n_pages > nr_max_pages - nr_mem_pages )
+        n_pages = nr_max_pages - nr_mem_pages;
+    if ( n_pages > N_BALLOON_FRAMES )
+        n_pages = N_BALLOON_FRAMES;
+
+    /* Resize alloc_bitmap if necessary. */
+    while ( mm_bitmap_size * 8 < nr_mem_pages + n_pages )
+    {
+        page = alloc_page();
+        if ( !page )
+            return -ENOMEM;
+
+        memset((void *)page, ~0, PAGE_SIZE);
+        if ( map_frame_rw((unsigned long)mm_bitmap + mm_bitmap_size,
+                          virt_to_mfn(page)) )
+        {
+            free_page((void *)page);
+            return -ENOMEM;
+        }
+
+        mm_bitmap_size += PAGE_SIZE;
+    }
+
+    rc = arch_expand_p2m(nr_mem_pages + n_pages);
+    if ( rc )
+        return rc;
+
+    /* Get new memory from hypervisor. */
+    for ( pfn = 0; pfn < n_pages; pfn++ )
+    {
+        balloon_frames[pfn] = nr_mem_pages + pfn;
+    }
+    set_xen_guest_handle(reservation.extent_start, balloon_frames);
+    reservation.nr_extents = n_pages;
+    rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+    if ( rc <= 0 )
+        return rc;
+
+    for ( pfn = 0; pfn < rc; pfn++ )
+    {
+        arch_pfn_add(nr_mem_pages + pfn, balloon_frames[pfn]);
+        free_page(pfn_to_virt(nr_mem_pages + pfn));
+    }
+
+    nr_mem_pages += rc;
+
+    return rc;
+}
diff --git a/include/balloon.h b/include/balloon.h
index 9154f44..5ec1bbb 100644
--- a/include/balloon.h
+++ b/include/balloon.h
@@ -28,10 +28,15 @@
 
 extern unsigned long nr_max_pages;
 extern unsigned long virt_kernel_area_end;
+extern unsigned long nr_mem_pages;
 
 void get_max_pages(void);
+int balloon_up(unsigned long n_pages);
+
 void arch_remap_p2m(unsigned long max_pfn);
 void mm_alloc_bitmap_remap(void);
+int arch_expand_p2m(unsigned long max_pfn);
+void arch_pfn_add(unsigned long pfn, unsigned long mfn);
 
 #else /* CONFIG_BALLOON */
 
diff --git a/mm.c b/mm.c
index e2f55af..5364079 100644
--- a/mm.c
+++ b/mm.c
@@ -382,6 +382,10 @@ void init_mm(void)
     arch_init_p2m(max_pfn);
     
     arch_init_demand_mapping_area();
+
+#ifdef CONFIG_BALLOON
+    nr_mem_pages = max_pfn;
+#endif
 }
 
 void fini_mm(void)
-- 
2.6.6


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/cgi-bin/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.