[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XenPPC] [linux-ppc-2.6] [XEN][POWERPC] Use a bitmap to manage the foreign page area



# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID 39d3d1cfe7146a6542a7708bace0ab38f11e48b5
# Parent  2a9c6a23cd1292e9ed361e33d640ce84a6fbdb53
[XEN][POWERPC] Use a bitmap to manage the foreign page area

We needed to be able to dispense pages from the foreign map to Xen VIO
back drivers.  I thougth mempool would be an easy way to do this, but
it seems that a bitmap is far easier and simpler to do this with.

Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
---
 arch/powerpc/platforms/xen/balloon.c |   14 +---
 arch/powerpc/platforms/xen/gnttab.c  |  100 ++++++++++++++++-------------------
 arch/powerpc/platforms/xen/setup.h   |    4 -
 arch/powerpc/platforms/xen/util.c    |    2 
 4 files changed, 55 insertions(+), 65 deletions(-)

diff -r 2a9c6a23cd12 -r 39d3d1cfe714 arch/powerpc/platforms/xen/balloon.c
--- a/arch/powerpc/platforms/xen/balloon.c      Fri Nov 03 16:50:22 2006 -0500
+++ b/arch/powerpc/platforms/xen/balloon.c      Wed Nov 08 15:44:19 2006 -0500
@@ -11,22 +11,16 @@ struct page **alloc_empty_pages_and_page
 {
        struct page *page, **pagevec;
        int i;
-       int scrub = 0;
 
        pagevec = kmalloc(sizeof(*pagevec) * nr_pages, GFP_KERNEL);
        if (pagevec == NULL)
                return  NULL;
 
        for (i = 0; i < nr_pages; i++) {
-               page = alloc_grant_page();
+               page = alloc_foreign_page();
                pagevec[i] = page;
-
-               if (scrub) {
-                       void *vaddr;
-
-                       vaddr = page_address(page);
-                       scrub_pages(vaddr, 1);
-               }
+               /* There is no real page backing us yet so it cannot
+                * be scrubbed */
        }
 
        return pagevec;
@@ -40,7 +34,7 @@ void free_empty_pages_and_pagevec(struct
                return;
 
        for (i = 0; i < nr_pages; i++) {
-               free_grant_page(pagevec[i]);
+               free_foreign_page(pagevec[i]);
        }
        
        kfree(pagevec);
diff -r 2a9c6a23cd12 -r 39d3d1cfe714 arch/powerpc/platforms/xen/gnttab.c
--- a/arch/powerpc/platforms/xen/gnttab.c       Fri Nov 03 16:50:22 2006 -0500
+++ b/arch/powerpc/platforms/xen/gnttab.c       Wed Nov 08 15:44:19 2006 -0500
@@ -2,7 +2,6 @@
 #include <linux/config.h>
 #include <linux/vmalloc.h>
 #include <linux/memory_hotplug.h>
-#include <linux/mempool.h>
 #include <xen/gnttab.h>
 #include <asm/hypervisor.h>
 #include <xen/interface/grant_table.h>
@@ -25,8 +24,9 @@
 
 struct address_space xen_foreign_dummy_mapping;
 
-static ulong foreign_map_base;
-static ulong foreign_map_end;
+static ulong foreign_map_pfn;
+static ulong foreign_map_pgs;
+static unsigned long *foreign_map_bitmap;
 
 
 /* hijack _mapcount */
@@ -215,7 +215,9 @@ static void gnttab_post_map_grant_ref(
                        continue;
                }
 
-               BUG_ON(pa < foreign_map_base || pa >= foreign_map_end);
+               BUG_ON(pa < (foreign_map_pfn << PAGE_SHIFT));
+               BUG_ON(pa >= (foreign_map_pfn << PAGE_SHIFT) + 
+                      (foreign_map_pgs << PAGE_SHIFT));
 
                page = virt_to_page(__va(pa));
 
@@ -330,9 +332,7 @@ static ulong find_grant_maps(void)
                panic("foreign-map is 0x%lx, expect 0x%lx\n",
                      gm[0], expect);
 
-       foreign_map_base = 1UL << (gm[0] + PAGE_SHIFT);
-       foreign_map_end = foreign_map_base + (gm[1] << PAGE_SHIFT);
-
+       foreign_map_pfn = 1UL << gm[0];
        return gm[1];
 }
 
@@ -353,40 +353,27 @@ int arch_gnttab_suspend(volatile void __
        return 0;
 }
 
-static mempool_t *grant_pool;
-struct page *alloc_grant_page(void)
-{
-       void *va;
-
-       va = mempool_alloc(grant_pool, GFP_KERNEL);
-
-       return virt_to_page(va);
-}
-
-void free_grant_page(struct page *page)
-{
-       mempool_free(pfn_to_kaddr(page_to_pfn(page)), grant_pool);
-}
-
-static void *gp_alloc(gfp_t gfp_mask, void *pool_data)
-{
-       static ulong count;
-       ulong max = (ulong)pool_data;
-       void *ret = NULL;
-
-       if (count < max) {
-               ulong pfn;
-
-               pfn = (foreign_map_base >> PAGE_SHIFT) + count;
-               ret = pfn_to_kaddr(pfn);
-               ++count;
-       }
-       return ret;
-}
-
-static void gp_free(void *element, void *pool_data)
-{
-       BUG();
+struct page *alloc_foreign_page(void)
+{
+       int bit;
+       do {
+               bit = find_first_zero_bit(foreign_map_bitmap,
+                                         foreign_map_pgs);
+               if (bit >= foreign_map_pgs)
+                       return NULL;
+       } while (test_and_set_bit(bit, foreign_map_bitmap) == 1);
+
+       return pfn_to_page(foreign_map_pfn + bit);
+}
+
+void free_foreign_page(struct page *page)
+{
+       int bit = page_to_pfn(page) - foreign_map_pfn;
+
+       BUG_ON(bit < 0);
+       BUG_ON(bit >= foreign_map_pgs);
+
+       clear_bit(bit, foreign_map_bitmap);
 }
 
 static void setup_grant_area(void)
@@ -400,25 +387,39 @@ static void setup_grant_area(void)
        pgs = find_grant_maps();
        setup_foreign_segment();
 
+       printk("%s: Xen VIO will use a foreign address space of 0x%lx pages\n",
+              __func__, pgs);
+
        /* add pages to the zone */
        nid = 0;
        pgdata = NODE_DATA(nid);
        zone = pgdata->node_zones;
 
-       err = __add_pages(zone, foreign_map_base >> PAGE_SHIFT, pgs);
+       err = __add_pages(zone, foreign_map_pfn, pgs);
 
        if (err < 0) {
                printk(KERN_EMERG "%s: add_pages(0x%lx, 0x%lx) = %d\n",
-                      __func__, foreign_map_base >> PAGE_SHIFT, pgs, err);
+                      __func__, foreign_map_pfn, pgs, err);
                BUG();
        }
 
-       /* create a memory pool to manage these pages */
-       grant_pool = mempool_create(pgs, gp_alloc, gp_free, (void *)pgs);
-       if (grant_pool == NULL) {
-               printk(KERN_EMERG "%s: mempool_create(): failed\n", __func__);
+       /* create a bitmap to manage these pages */
+       foreign_map_bitmap = kmalloc(BITS_TO_LONGS(pgs) * sizeof(long),
+                                    GFP_KERNEL);
+       if (foreign_map_bitmap == NULL) {
+               printk(KERN_EMERG 
+                      "%s: could not allocate foreign_map_bitmap to "
+                      "manage 0x%lx foreign pages\n", __func__, pgs);
                BUG();
        }
+       /* I'm paranoid so make sure we assign the top bits so we
+        * don't give them away */
+       bitmap_fill(&foreign_map_bitmap[BITS_TO_LONGS(pgs) - 1],
+                   BITS_PER_LONG);
+       /* now clear all the real bits */
+       bitmap_zero(foreign_map_bitmap, pgs);
+
+       foreign_map_pgs = pgs;
 }
 
 void *arch_gnttab_map(unsigned long *frames)
@@ -439,8 +440,3 @@ void *arch_gnttab_map(unsigned long *fra
 
        return shared;
 }
-
-int arch_is_foreign_page(struct page *page)
-{
-       return ((page_to_pfn(page) << PAGE_SHIFT) >= foreign_map_base);
-}
diff -r 2a9c6a23cd12 -r 39d3d1cfe714 arch/powerpc/platforms/xen/setup.h
--- a/arch/powerpc/platforms/xen/setup.h        Fri Nov 03 16:50:22 2006 -0500
+++ b/arch/powerpc/platforms/xen/setup.h        Wed Nov 08 15:44:19 2006 -0500
@@ -22,6 +22,6 @@ static inline u64 jiffies_to_ns(unsigned
 
 #define xen_guest_handle(hnd)  ((hnd).p)
 
-extern struct page *alloc_grant_page(void);
-extern void free_grant_page(struct page *page);
+extern struct page *alloc_foreign_page(void);
+extern void free_foreign_page(struct page *page);
 
diff -r 2a9c6a23cd12 -r 39d3d1cfe714 arch/powerpc/platforms/xen/util.c
--- a/arch/powerpc/platforms/xen/util.c Fri Nov 03 16:50:22 2006 -0500
+++ b/arch/powerpc/platforms/xen/util.c Wed Nov 08 15:44:19 2006 -0500
@@ -12,7 +12,7 @@ struct vm_struct *alloc_vm_area(unsigned
        struct vm_struct *area;
        struct page *page;
 
-       page = alloc_grant_page();
+       page = alloc_foreign_page();
        if (page == NULL) {
                BUG();
                return NULL;

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.