[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/3 v3] xen-balloon: Add interface to retrieve ballooned pages



Pages that have been ballooned are useful for other Xen drivers doing
grant table actions, because these pages have valid struct page/PFNs but
have no valid MFN so are available for remapping.

Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
 drivers/xen/balloon.c |   61 ++++++++++++++++++++++++++++++++++++++++++++++--
 include/xen/balloon.h |    3 ++
 2 files changed, 61 insertions(+), 3 deletions(-)

diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b0a7a92..53deaec 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -116,14 +116,17 @@ static void balloon_append(struct page *page)
 }
 
 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-static struct page *balloon_retrieve(void)
+static struct page *balloon_retrieve(int prefer_highmem)
 {
        struct page *page;
 
        if (list_empty(&ballooned_pages))
                return NULL;
 
-       page = list_entry(ballooned_pages.next, struct page, lru);
+       if (prefer_highmem)
+               page = list_entry(ballooned_pages.prev, struct page, lru);
+       else
+               page = list_entry(ballooned_pages.next, struct page, lru);
        list_del(&page->lru);
 
        if (PageHighMem(page)) {
@@ -198,7 +201,7 @@ static int increase_reservation(unsigned long nr_pages)
                goto out;
 
        for (i = 0; i < rc; i++) {
-               page = balloon_retrieve();
+               page = balloon_retrieve(0);
                BUG_ON(page == NULL);
 
                pfn = page_to_pfn(page);
@@ -328,6 +331,58 @@ void balloon_set_new_target(unsigned long target)
 }
 EXPORT_SYMBOL_GPL(balloon_set_new_target);
 
+/**
+ * alloc_xenballooned_pages - get pages that have been ballooned out
+ * @nr_pages: Number of pages to get
+ * @pages: pages returned
+ * @return number of pages retrieved
+ */
+int alloc_xenballooned_pages(int nr_pages, struct page** pages)
+{
+       int rv = 0;
+       int alloc_failed = 0;
+       struct page* page;
+       mutex_lock(&balloon_mutex);
+       while (rv < nr_pages) {
+               page = balloon_retrieve(1);
+               if (page) {
+                       pages[rv++] = page;
+               } else {
+                       if (alloc_failed)
+                               break;
+                       if (decrease_reservation(nr_pages - rv))
+                               alloc_failed = 1;
+               }
+       }
+       mutex_unlock(&balloon_mutex);
+       return rv;
+}
+EXPORT_SYMBOL(alloc_xenballooned_pages);
+
+/**
+ * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
+ * @nr_pages: Number of pages
+ * @pages: pages to return
+ */
+void free_xenballooned_pages(int nr_pages, struct page** pages)
+{
+       int i;
+
+       mutex_lock(&balloon_mutex);
+
+       for (i = 0; i < nr_pages; i++) {
+               if (pages[i])
+                       balloon_append(pages[i]);
+       }
+
+       /* The balloon may be too large now. Shrink it if needed. */
+       if (current_target() != balloon_stats.current_pages)
+               schedule_work(&balloon_worker);
+
+       mutex_unlock(&balloon_mutex);
+}
+EXPORT_SYMBOL(free_xenballooned_pages);
+
 static int __init balloon_init(void)
 {
        unsigned long pfn, nr_pages, extra_pfn_end;
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index b2b7c21..72a6927 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -19,3 +19,6 @@ struct balloon_stats {
 extern struct balloon_stats balloon_stats;
 
 void balloon_set_new_target(unsigned long target);
+
+int alloc_xenballooned_pages(int nr_pages, struct page** pages);
+void free_xenballooned_pages(int nr_pages, struct page** pages);
-- 
1.7.3.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.