[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Apply Ross Mcilroy's balloon_dealloc_empty_page_range patch



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 2052ce3345c1441f96729bc59120b3352a995112
# Parent  531ad4bde8f249c318588f41bdbb72d0b0e0ea9c
Apply Ross Mcilroy's balloon_dealloc_empty_page_range patch
and generally clean up the ballon driver a bunch.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 531ad4bde8f2 -r 2052ce3345c1 
linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c        Mon Aug 22 
10:21:18 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c        Mon Aug 22 
16:38:13 2005
@@ -405,54 +405,6 @@
        balloon_unlock(flags);
 }
 
-
-unsigned long allocate_empty_lowmem_region(unsigned long pages)
-{
-       pgd_t         *pgd;
-       pud_t         *pud; 
-       pmd_t         *pmd;
-       pte_t         *pte;
-       unsigned long *pfn_array;
-       unsigned long  vstart;
-       unsigned long  i;
-       unsigned int   order = get_order(pages*PAGE_SIZE);
-
-       vstart = __get_free_pages(GFP_KERNEL, order);
-       if (vstart == 0)
-               return 0UL;
-
-       scrub_pages(vstart, 1 << order);
-
-       pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
-       BUG_ON(pfn_array == NULL);
-
-       for (i = 0; i < (1<<order); i++) {
-               pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
-               pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
-               pfn_array[i] = pte_mfn(*pte);
-#ifdef CONFIG_X86_64
-               xen_l1_entry_update(pte, __pte(0));
-#else
-               BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 
-                                                   __pte_ma(0), 0));
-#endif
-               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
-                       INVALID_P2M_ENTRY;
-       }
-
-       flush_tlb_all();
-
-       balloon_put_pages(pfn_array, 1 << order);
-
-       vfree(pfn_array);
-
-       return vstart;
-}
-
-EXPORT_SYMBOL(allocate_empty_lowmem_region);
-
 /*
  * Local variables:
  *  c-file-style: "linux"
diff -r 531ad4bde8f2 -r 2052ce3345c1 
linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c
--- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Mon Aug 22 
10:21:18 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Mon Aug 22 
16:38:13 2005
@@ -83,12 +83,15 @@
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 /* Use the private and mapping fields of struct page as a list. */
-#define PAGE_TO_LIST(p) ( (struct list_head *)&p->private )
-#define LIST_TO_PAGE(l) ( list_entry( ((unsigned long *)l),   \
-                                      struct page, private ) )
-#define UNLIST_PAGE(p)  do { list_del(PAGE_TO_LIST(p));       \
-                             p->mapping = NULL;               \
-                             p->private = 0; } while(0)
+#define PAGE_TO_LIST(p) ((struct list_head *)&p->private)
+#define LIST_TO_PAGE(l)                                \
+       (list_entry(((unsigned long *)l), struct page, private))
+#define UNLIST_PAGE(p)                         \
+       do {                                    \
+               list_del(PAGE_TO_LIST(p));      \
+               p->mapping = NULL;              \
+               p->private = 0;                 \
+       } while(0)
 #else
 /* There's a dedicated list field in struct page we can use.    */
 #define PAGE_TO_LIST(p) ( &p->list )
@@ -104,56 +107,53 @@
 #endif
 
 #define IPRINTK(fmt, args...) \
-    printk(KERN_INFO "xen_mem: " fmt, ##args)
+       printk(KERN_INFO "xen_mem: " fmt, ##args)
 #define WPRINTK(fmt, args...) \
-    printk(KERN_WARNING "xen_mem: " fmt, ##args)
+       printk(KERN_WARNING "xen_mem: " fmt, ##args)
 
 /* balloon_append: add the given page to the balloon. */
 static void balloon_append(struct page *page)
 {
-    /* Low memory is re-populated first, so highmem pages go at list tail. */
-    if ( PageHighMem(page) )
-    {
-        list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
-        balloon_high++;
-    }
-    else
-    {
-        list_add(PAGE_TO_LIST(page), &ballooned_pages);
-        balloon_low++;
-    }
+       /* Lowmem is re-populated first, so highmem pages go at list tail. */
+       if (PageHighMem(page)) {
+               list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
+               balloon_high++;
+       } else {
+               list_add(PAGE_TO_LIST(page), &ballooned_pages);
+               balloon_low++;
+       }
 }
 
 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
 static struct page *balloon_retrieve(void)
 {
-    struct page *page;
-
-    if ( list_empty(&ballooned_pages) )
-        return NULL;
-
-    page = LIST_TO_PAGE(ballooned_pages.next);
-    UNLIST_PAGE(page);
-
-    if ( PageHighMem(page) )
-        balloon_high--;
-    else
-        balloon_low--;
-
-    return page;
+       struct page *page;
+
+       if (list_empty(&ballooned_pages))
+               return NULL;
+
+       page = LIST_TO_PAGE(ballooned_pages.next);
+       UNLIST_PAGE(page);
+
+       if (PageHighMem(page))
+               balloon_high--;
+       else
+               balloon_low--;
+
+       return page;
 }
 
 static void balloon_alarm(unsigned long unused)
 {
-    schedule_work(&balloon_worker);
+       schedule_work(&balloon_worker);
 }
 
 static unsigned long current_target(void)
 {
-    unsigned long target = min(target_pages, hard_limit);
-    if ( target > (current_pages + balloon_low + balloon_high) )
-        target = current_pages + balloon_low + balloon_high;
-    return target;
+       unsigned long target = min(target_pages, hard_limit);
+       if (target > (current_pages + balloon_low + balloon_high))
+               target = current_pages + balloon_low + balloon_high;
+       return target;
 }
 
 /*
@@ -164,161 +164,147 @@
  */
 static void balloon_process(void *unused)
 {
-    unsigned long *mfn_list, pfn, i, flags;
-    struct page   *page;
-    long           credit, debt, rc;
-    void          *v;
-
-    down(&balloon_mutex);
+       unsigned long *mfn_list, pfn, i, flags;
+       struct page   *page;
+       long           credit, debt, rc;
+       void          *v;
+
+       down(&balloon_mutex);
 
  retry:
-    mfn_list = NULL;
-
-    if ( (credit = current_target() - current_pages) > 0 )
-    {
-        mfn_list = (unsigned long *)vmalloc(credit * sizeof(*mfn_list));
-        if ( mfn_list == NULL )
-            goto out;
-
-        balloon_lock(flags);
-        rc = HYPERVISOR_dom_mem_op(
-            MEMOP_increase_reservation, mfn_list, credit, 0);
-        balloon_unlock(flags);
-        if ( rc < credit )
-        {
-            /* We hit the Xen hard limit: reprobe. */
-            if ( HYPERVISOR_dom_mem_op(
-                MEMOP_decrease_reservation, mfn_list, rc, 0) != rc )
-                BUG();
-            hard_limit = current_pages + rc - driver_pages;
-            vfree(mfn_list);
-            goto retry;
-        }
-
-        for ( i = 0; i < credit; i++ )
-        {
-            if ( (page = balloon_retrieve()) == NULL )
-                BUG();
-
-            pfn = page - mem_map;
-            if ( phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
-                BUG();
-
-            /* Update P->M and M->P tables. */
-            phys_to_machine_mapping[pfn] = mfn_list[i];
-            xen_machphys_update(mfn_list[i], pfn);
+       mfn_list = NULL;
+
+       if ((credit = current_target() - current_pages) > 0) {
+               mfn_list = vmalloc(credit * sizeof(*mfn_list));
+               if (mfn_list == NULL)
+                       goto out;
+
+               balloon_lock(flags);
+               rc = HYPERVISOR_dom_mem_op(
+                       MEMOP_increase_reservation, mfn_list, credit, 0);
+               balloon_unlock(flags);
+               if (rc < credit) {
+                       /* We hit the Xen hard limit: reprobe. */
+                       BUG_ON(HYPERVISOR_dom_mem_op(
+                               MEMOP_decrease_reservation,
+                               mfn_list, rc, 0) != rc);
+                       hard_limit = current_pages + rc - driver_pages;
+                       vfree(mfn_list);
+                       goto retry;
+               }
+
+               for (i = 0; i < credit; i++) {
+                       page = balloon_retrieve();
+                       BUG_ON(page == NULL);
+
+                       pfn = page - mem_map;
+                       if (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
+                               BUG();
+
+                       /* Update P->M and M->P tables. */
+                       phys_to_machine_mapping[pfn] = mfn_list[i];
+                       xen_machphys_update(mfn_list[i], pfn);
             
-            /* Link back into the page tables if it's not a highmem page. */
-            if ( pfn < max_low_pfn )
-            {
-                BUG_ON(HYPERVISOR_update_va_mapping(
-                    (unsigned long)__va(pfn << PAGE_SHIFT),
-                    pfn_pte_ma(mfn_list[i], PAGE_KERNEL), 0));
-            }
-
-            /* Finally, relinquish the memory back to the system allocator. */
-            ClearPageReserved(page);
-            set_page_count(page, 1);
-            __free_page(page);
-        }
-
-        current_pages += credit;
-    }
-    else if ( credit < 0 )
-    {
-        debt = -credit;
-
-        mfn_list = (unsigned long *)vmalloc(debt * sizeof(*mfn_list));
-        if ( mfn_list == NULL )
-            goto out;
-
-        for ( i = 0; i < debt; i++ )
-        {
-            if ( (page = alloc_page(GFP_HIGHUSER)) == NULL )
-            {
-                debt = i;
-                break;
-            }
-
-            pfn = page - mem_map;
-            mfn_list[i] = phys_to_machine_mapping[pfn];
-
-            if ( !PageHighMem(page) )
-            {
-                v = phys_to_virt(pfn << PAGE_SHIFT);
-                scrub_pages(v, 1);
-                BUG_ON(HYPERVISOR_update_va_mapping(
-                    (unsigned long)v, __pte_ma(0), 0));
-            }
+                       /* Link back into the page tables if not highmem. */
+                       if (pfn < max_low_pfn)
+                               BUG_ON(HYPERVISOR_update_va_mapping(
+                                       (unsigned long)__va(pfn << PAGE_SHIFT),
+                                       pfn_pte_ma(mfn_list[i], PAGE_KERNEL),
+                                       0));
+
+                       /* Relinquish the page back to the allocator. */
+                       ClearPageReserved(page);
+                       set_page_count(page, 1);
+                       __free_page(page);
+               }
+
+               current_pages += credit;
+       } else if (credit < 0) {
+               debt = -credit;
+
+               mfn_list = vmalloc(debt * sizeof(*mfn_list));
+               if (mfn_list == NULL)
+                       goto out;
+
+               for (i = 0; i < debt; i++) {
+                       if ((page = alloc_page(GFP_HIGHUSER)) == NULL) {
+                               debt = i;
+                               break;
+                       }
+
+                       pfn = page - mem_map;
+                       mfn_list[i] = phys_to_machine_mapping[pfn];
+
+                       if (!PageHighMem(page)) {
+                               v = phys_to_virt(pfn << PAGE_SHIFT);
+                               scrub_pages(v, 1);
+                               BUG_ON(HYPERVISOR_update_va_mapping(
+                                       (unsigned long)v, __pte_ma(0), 0));
+                       }
 #ifdef CONFIG_XEN_SCRUB_PAGES
-            else
-            {
-                v = kmap(page);
-                scrub_pages(v, 1);
-                kunmap(page);
-            }
+                       else {
+                               v = kmap(page);
+                               scrub_pages(v, 1);
+                               kunmap(page);
+                       }
 #endif
-        }
-
-        /* Ensure that ballooned highmem pages don't have cached mappings. */
-        kmap_flush_unused();
-        flush_tlb_all();
-
-        /* No more mappings: invalidate pages in P2M and add to balloon. */
-        for ( i = 0; i < debt; i++ )
-        {
-            pfn = mfn_to_pfn(mfn_list[i]);
-            phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
-            balloon_append(pfn_to_page(pfn));
-        }
-
-        if ( HYPERVISOR_dom_mem_op(
-            MEMOP_decrease_reservation, mfn_list, debt, 0) != debt )
-            BUG();
-
-        current_pages -= debt;
-    }
+               }
+
+               /* Ensure that ballooned highmem pages don't have kmaps. */
+               kmap_flush_unused();
+               flush_tlb_all();
+
+               /* No more mappings: invalidate P2M and add to balloon. */
+               for (i = 0; i < debt; i++) {
+                       pfn = mfn_to_pfn(mfn_list[i]);
+                       phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
+                       balloon_append(pfn_to_page(pfn));
+               }
+
+               BUG_ON(HYPERVISOR_dom_mem_op(
+                       MEMOP_decrease_reservation,mfn_list, debt, 0) != debt);
+
+               current_pages -= debt;
+       }
 
  out:
-    if ( mfn_list != NULL )
-        vfree(mfn_list);
-
-    /* Schedule more work if there is some still to be done. */
-    if ( current_target() != current_pages )
-        mod_timer(&balloon_timer, jiffies + HZ);
-
-    up(&balloon_mutex);
+       if (mfn_list != NULL)
+               vfree(mfn_list);
+
+       /* Schedule more work if there is some still to be done. */
+       if (current_target() != current_pages)
+               mod_timer(&balloon_timer, jiffies + HZ);
+
+       up(&balloon_mutex);
 }
 
 /* Resets the Xen limit, sets new target, and kicks off processing. */
 static void set_new_target(unsigned long target)
 {
-    /* No need for lock. Not read-modify-write updates. */
-    hard_limit   = ~0UL;
-    target_pages = target;
-    schedule_work(&balloon_worker);
+       /* No need for lock. Not read-modify-write updates. */
+       hard_limit   = ~0UL;
+       target_pages = target;
+       schedule_work(&balloon_worker);
 }
 
 static struct xenbus_watch target_watch =
 {
-    .node = "memory/target"
+       .node = "memory/target"
 };
 
 /* React to a change in the target key */
 static void watch_target(struct xenbus_watch *watch, const char *node)
 {
-    unsigned long new_target;
-    int err;
-
-    err = xenbus_scanf("memory", "target", "%lu", &new_target);
+       unsigned long new_target;
+       int err;
+
+       err = xenbus_scanf("memory", "target", "%lu", &new_target);
+       if (err != 1) {
+               printk(KERN_ERR "Unable to read memory/target\n");
+               return;
+       } 
         
-    if(err != 1) 
-    {
-        printk(KERN_ERR "Unable to read memory/target\n");
-        return;
-    } 
-        
-    set_new_target(new_target >> PAGE_SHIFT);
+       set_new_target(new_target >> PAGE_SHIFT);
     
 }
 
@@ -329,141 +315,185 @@
                          unsigned long event,
                          void *data)
 {
-    int err;
-
-    BUG_ON(down_trylock(&xenbus_lock) == 0);
-
-    err = register_xenbus_watch(&target_watch);
-
-    if (err) {
-        printk(KERN_ERR "Failed to set balloon watcher\n");
-    }
-
-    return NOTIFY_DONE;
+       int err;
+
+       BUG_ON(down_trylock(&xenbus_lock) == 0);
+
+       err = register_xenbus_watch(&target_watch);
+       if (err)
+               printk(KERN_ERR "Failed to set balloon watcher\n");
+
+       return NOTIFY_DONE;
     
 }
 
 static int balloon_write(struct file *file, const char __user *buffer,
                          unsigned long count, void *data)
 {
-    char memstring[64], *endchar;
-    unsigned long long target_bytes;
-
-    if ( !capable(CAP_SYS_ADMIN) )
-        return -EPERM;
-
-    if ( count <= 1 )
-        return -EBADMSG; /* runt */
-    if ( count > sizeof(memstring) )
-        return -EFBIG;   /* too long */
-
-    if ( copy_from_user(memstring, buffer, count) )
-        return -EFAULT;
-    memstring[sizeof(memstring)-1] = '\0';
-
-    target_bytes = memparse(memstring, &endchar);
-    set_new_target(target_bytes >> PAGE_SHIFT);
-
-    return count;
+       char memstring[64], *endchar;
+       unsigned long long target_bytes;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (count <= 1)
+               return -EBADMSG; /* runt */
+       if (count > sizeof(memstring))
+               return -EFBIG;   /* too long */
+
+       if (copy_from_user(memstring, buffer, count))
+               return -EFAULT;
+       memstring[sizeof(memstring)-1] = '\0';
+
+       target_bytes = memparse(memstring, &endchar);
+       set_new_target(target_bytes >> PAGE_SHIFT);
+
+       return count;
 }
 
 static int balloon_read(char *page, char **start, off_t off,
                         int count, int *eof, void *data)
 {
-    int len;
-
-    len = sprintf(
-        page,
-        "Current allocation: %8lu kB\n"
-        "Requested target:   %8lu kB\n"
-        "Low-mem balloon:    %8lu kB\n"
-        "High-mem balloon:   %8lu kB\n"
-        "Xen hard limit:     ",
-        PAGES2KB(current_pages), PAGES2KB(target_pages), 
-        PAGES2KB(balloon_low), PAGES2KB(balloon_high));
-
-    if ( hard_limit != ~0UL )
-        len += sprintf(
-            page + len, 
-            "%8lu kB (inc. %8lu kB driver headroom)\n",
-            PAGES2KB(hard_limit), PAGES2KB(driver_pages));
-    else
-        len += sprintf(
-            page + len,
-            "     ??? kB\n");
-
-    *eof = 1;
-    return len;
+       int len;
+
+       len = sprintf(
+               page,
+               "Current allocation: %8lu kB\n"
+               "Requested target:   %8lu kB\n"
+               "Low-mem balloon:    %8lu kB\n"
+               "High-mem balloon:   %8lu kB\n"
+               "Xen hard limit:     ",
+               PAGES2KB(current_pages), PAGES2KB(target_pages), 
+               PAGES2KB(balloon_low), PAGES2KB(balloon_high));
+
+       if (hard_limit != ~0UL) {
+               len += sprintf(
+                       page + len, 
+                       "%8lu kB (inc. %8lu kB driver headroom)\n",
+                       PAGES2KB(hard_limit), PAGES2KB(driver_pages));
+       } else {
+               len += sprintf(
+                       page + len,
+                       "     ??? kB\n");
+       }
+
+       *eof = 1;
+       return len;
 }
 
 static struct notifier_block xenstore_notifier;
 
 static int __init balloon_init(void)
 {
-    unsigned long pfn;
-    struct page *page;
-
-    IPRINTK("Initialising balloon driver.\n");
-
-    current_pages = min(xen_start_info.nr_pages, max_pfn);
-    target_pages  = current_pages;
-    balloon_low   = 0;
-    balloon_high  = 0;
-    driver_pages  = 0UL;
-    hard_limit    = ~0UL;
-
-    init_timer(&balloon_timer);
-    balloon_timer.data = 0;
-    balloon_timer.function = balloon_alarm;
+       unsigned long pfn;
+       struct page *page;
+
+       IPRINTK("Initialising balloon driver.\n");
+
+       current_pages = min(xen_start_info.nr_pages, max_pfn);
+       target_pages  = current_pages;
+       balloon_low   = 0;
+       balloon_high  = 0;
+       driver_pages  = 0UL;
+       hard_limit    = ~0UL;
+
+       init_timer(&balloon_timer);
+       balloon_timer.data = 0;
+       balloon_timer.function = balloon_alarm;
     
-    if ( (balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL )
-    {
-        WPRINTK("Unable to create /proc/xen/balloon.\n");
-        return -1;
-    }
-
-    balloon_pde->read_proc  = balloon_read;
-    balloon_pde->write_proc = balloon_write;
+       if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
+               WPRINTK("Unable to create /proc/xen/balloon.\n");
+               return -1;
+       }
+
+       balloon_pde->read_proc  = balloon_read;
+       balloon_pde->write_proc = balloon_write;
     
-    /* Initialise the balloon with excess memory space. */
-    for ( pfn = xen_start_info.nr_pages; pfn < max_pfn; pfn++ )
-    {
-        page = &mem_map[pfn];
-        if ( !PageReserved(page) )
-            balloon_append(page);
-    }
-
-    target_watch.callback = watch_target;
-    xenstore_notifier.notifier_call = balloon_init_watcher;
-
-    register_xenstore_notifier(&xenstore_notifier);
+       /* Initialise the balloon with excess memory space. */
+       for (pfn = xen_start_info.nr_pages; pfn < max_pfn; pfn++) {
+               page = &mem_map[pfn];
+               if (!PageReserved(page))
+                       balloon_append(page);
+       }
+
+       target_watch.callback = watch_target;
+       xenstore_notifier.notifier_call = balloon_init_watcher;
+
+       register_xenstore_notifier(&xenstore_notifier);
     
-    return 0;
+       return 0;
 }
 
 subsys_initcall(balloon_init);
 
 void balloon_update_driver_allowance(long delta)
 {
-    unsigned long flags;
-    balloon_lock(flags);
-    driver_pages += delta; /* non-atomic update */
-    balloon_unlock(flags);
-}
-
-void balloon_put_pages(unsigned long *mfn_list, unsigned long nr_mfns)
-{
-    unsigned long flags;
-
-    balloon_lock(flags);
-    if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
-                               mfn_list, nr_mfns, 0) != nr_mfns )
-        BUG();
-    current_pages -= nr_mfns; /* non-atomic update */
-    balloon_unlock(flags);
-
-    schedule_work(&balloon_worker);
+       unsigned long flags;
+       balloon_lock(flags);
+       driver_pages += delta; /* non-atomic update */
+       balloon_unlock(flags);
+}
+
+struct page *balloon_alloc_empty_page_range(unsigned long nr_pages)
+{
+       int f(pte_t *pte, struct page *pte_page,
+             unsigned long addr, void *data)
+       {
+               unsigned long mfn = pte_mfn(*pte);
+               set_pte(pte, __pte_ma(0));
+               phys_to_machine_mapping[__pa(addr) >> PAGE_SHIFT] =
+                       INVALID_P2M_ENTRY;
+               BUG_ON(HYPERVISOR_dom_mem_op(
+                       MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
+               return 0;
+        }
+
+       unsigned long vstart, flags;
+       unsigned int  order = get_order(nr_pages * PAGE_SIZE);
+
+       vstart = __get_free_pages(GFP_KERNEL, order);
+       if (vstart == 0)
+               return NULL;
+
+       scrub_pages(vstart, 1 << order);
+
+       balloon_lock(flags);
+       BUG_ON(generic_page_range(
+               &init_mm, vstart, PAGE_SIZE << order, f, NULL) != 0);
+       current_pages -= 1UL << order;
+       balloon_unlock(flags);
+
+       schedule_work(&balloon_worker);
+
+       flush_tlb_all();
+
+       return virt_to_page(vstart);
+}
+
+void balloon_dealloc_empty_page_range(
+       struct page *page, unsigned long nr_pages)
+{
+       unsigned long i, flags;
+       unsigned int  order = get_order(nr_pages * PAGE_SIZE);
+
+       balloon_lock(flags);
+       for (i = 0; i < (1UL << order); i++)
+               balloon_append(page + i);
+       balloon_unlock(flags);
+
+       schedule_work(&balloon_worker);
 }
 
 EXPORT_SYMBOL(balloon_update_driver_allowance);
-EXPORT_SYMBOL(balloon_put_pages);
+EXPORT_SYMBOL(balloon_alloc_empty_page_range);
+EXPORT_SYMBOL(balloon_dealloc_empty_page_range);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 531ad4bde8f2 -r 2052ce3345c1 
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Mon Aug 22 
10:21:18 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c        Mon Aug 22 
16:38:13 2005
@@ -569,6 +569,7 @@
 static int __init blkif_init(void)
 {
     int i;
+    struct page *page;
 
     if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
          !(xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
@@ -576,8 +577,9 @@
 
     blkif_interface_init();
 
-    if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
-        BUG();
+    page = balloon_alloc_empty_page_range(MMAP_PAGES);
+    BUG_ON(page == NULL);
+    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
 
     pending_cons = 0;
     pending_prod = MAX_PENDING_REQS;
diff -r 531ad4bde8f2 -r 2052ce3345c1 
linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c  Mon Aug 22 
10:21:18 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap_userdev.c  Mon Aug 22 
16:38:13 2005
@@ -775,9 +775,11 @@
 int blktap_init(void)
 {
     int err, i, j;
-
-    if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
-        BUG();
+    struct page *page;
+
+    page = balloon_alloc_empty_page_range(MMAP_PAGES);
+    BUG_ON(page == NULL);
+    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
 
 #ifdef CONFIG_XEN_BLKDEV_GRANT
     for (i=0; i<MAX_PENDING_REQS ; i++)
diff -r 531ad4bde8f2 -r 2052ce3345c1 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Mon Aug 22 
10:21:18 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Mon Aug 22 
16:38:13 2005
@@ -968,8 +968,9 @@
     
     netif_interface_init();
 
-    mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS);
-    BUG_ON(mmap_vstart == 0);
+    page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
+    BUG_ON(page == NULL);
+    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
 
     for ( i = 0; i < MAX_PENDING_REQS; i++ )
     {
diff -r 531ad4bde8f2 -r 2052ce3345c1 
linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c
--- a/linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c        Mon Aug 22 
10:21:18 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/usbback/usbback.c        Mon Aug 22 
16:38:13 2005
@@ -1027,13 +1027,15 @@
 static int __init usbif_init(void)
 {
     int i;
+    struct page *page;
 
     if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
          !(xen_start_info.flags & SIF_USB_BE_DOMAIN) )
         return 0;
-    
-    if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
-        BUG();
+
+    page = balloon_alloc_empty_page_range(MMAP_PAGES);
+    BUG_ON(page == NULL);
+    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
 
     pending_cons = 0;
     pending_prod = MAX_PENDING_REQS;
diff -r 531ad4bde8f2 -r 2052ce3345c1 
linux-2.6-xen-sparse/include/asm-xen/balloon.h
--- a/linux-2.6-xen-sparse/include/asm-xen/balloon.h    Mon Aug 22 10:21:18 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/balloon.h    Mon Aug 22 16:38:13 2005
@@ -35,10 +35,19 @@
  * Inform the balloon driver that it should allow some slop for device-driver
  * memory activities.
  */
-extern void balloon_update_driver_allowance(long delta);
+extern void
+balloon_update_driver_allowance(
+       long delta);
 
-/* Give up unmapped pages to the balloon driver. */
-extern void balloon_put_pages(unsigned long *mfn_list, unsigned long nr_mfns);
+/* Allocate an empty low-memory page range. */
+extern struct page *
+balloon_alloc_empty_page_range(
+       unsigned long nr_pages);
+
+/* Deallocate an empty page range, adding to the balloon. */
+extern void
+balloon_dealloc_empty_page_range(
+       struct page *page, unsigned long nr_pages);
 
 /*
  * Prevent the balloon driver from changing the memory reservation during
diff -r 531ad4bde8f2 -r 2052ce3345c1 
linux-2.6-xen-sparse/include/asm-xen/hypervisor.h
--- a/linux-2.6-xen-sparse/include/asm-xen/hypervisor.h Mon Aug 22 10:21:18 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/hypervisor.h Mon Aug 22 16:38:13 2005
@@ -137,9 +137,6 @@
 void xen_create_contiguous_region(unsigned long vstart, unsigned int order);
 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
 
-/* Allocate a contiguous empty region of low memory. Return virtual start. */
-unsigned long allocate_empty_lowmem_region(unsigned long pages);
-
 #include <asm/hypercall.h>
 
 #if defined(CONFIG_X86_64)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.