[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] use page_list_head and related stuff.



# HG changeset patch
# User Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
# Date 1234491779 -32400
# Node ID af0da711bbdb5f9c631aeca85b21eaf1bfb3eddb
# Parent  c7cba853583da45ee4478237047fdd5d6bed68cd
[IA64] use page_list_head and related stuff.

Use page_list_head and stuff for consistency with x86 code.

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
---
 xen/arch/ia64/xen/domain.c       |   26 ++++++++++++++------------
 xen/arch/ia64/xen/mm.c           |    4 ++--
 xen/arch/ia64/xen/tlb_track.c    |    8 ++++----
 xen/include/asm-ia64/domain.h    |    3 ++-
 xen/include/asm-ia64/mm.h        |   15 ++++++++++++++-
 xen/include/asm-ia64/tlb_track.h |    2 +-
 6 files changed, 37 insertions(+), 21 deletions(-)

diff -r c7cba853583d -r af0da711bbdb xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Fri Feb 13 11:22:28 2009 +0900
+++ b/xen/arch/ia64/xen/domain.c        Fri Feb 13 11:22:59 2009 +0900
@@ -608,7 +608,7 @@ int arch_domain_create(struct domain *d,
        memset(&d->arch.mm, 0, sizeof(d->arch.mm));
        d->arch.relres = RELRES_not_started;
        d->arch.mm_teardown_offset = 0;
-       INIT_LIST_HEAD(&d->arch.relmem_list);
+       INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
 
        if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
            goto fail_nomem;
@@ -1626,9 +1626,8 @@ int arch_set_info_guest(struct vcpu *v, 
        return rc;
 }
 
-static int relinquish_memory(struct domain *d, struct list_head *list)
-{
-    struct list_head *ent;
+static int relinquish_memory(struct domain *d, struct page_list_head *list)
+{
     struct page_info *page;
 #ifndef __ia64__
     unsigned long     x, y;
@@ -1637,16 +1636,14 @@ static int relinquish_memory(struct doma
 
     /* Use a recursive lock, as we may enter 'free_domheap_page'. */
     spin_lock_recursive(&d->page_alloc_lock);
-    ent = list->next;
-    while ( ent != list )
+
+    while ( (page = page_list_remove_head(list)) )
     {
-        page = list_entry(ent, struct page_info, list);
         /* Grab a reference to the page so it won't disappear from under us. */
         if ( unlikely(!get_page(page, d)) )
         {
             /* Couldn't get a reference -- someone is freeing this page. */
-            ent = ent->next;
-            list_move_tail(&page->list, &d->arch.relmem_list);
+            page_list_add_tail(page, &d->arch.relmem_list);
             continue;
         }
 
@@ -1681,9 +1678,8 @@ static int relinquish_memory(struct doma
 #endif
 
         /* Follow the list chain and /then/ potentially free the page. */
-        ent = ent->next;
         BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);
-        list_move_tail(&page->list, &d->arch.relmem_list);
+        page_list_add_tail(page, &d->arch.relmem_list);
         put_page(page);
 
         if (hypercall_preempt_check()) {
@@ -1692,7 +1688,13 @@ static int relinquish_memory(struct doma
         }
     }
 
-    list_splice_init(&d->arch.relmem_list, list);
+    /* list is empty at this point. */
+    if ( !page_list_empty(&d->arch.relmem_list) )
+    {
+        *list = d->arch.relmem_list;
+        INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
+    }
+
 
  out:
     spin_unlock_recursive(&d->page_alloc_lock);
diff -r c7cba853583d -r af0da711bbdb xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c    Fri Feb 13 11:22:28 2009 +0900
+++ b/xen/arch/ia64/xen/mm.c    Fri Feb 13 11:22:59 2009 +0900
@@ -474,7 +474,7 @@ share_xen_page_with_guest(struct page_in
         page->count_info |= PGC_allocated | 1;
         if ( unlikely(d->xenheap_pages++ == 0) )
             get_knownalive_domain(d);
-        list_add_tail(&page->list, &d->xenpage_list);
+        page_list_add_tail(page, &d->xenpage_list);
     }
 
     // grant_table_destroy() releases these pages.
@@ -2856,7 +2856,7 @@ steal_page(struct domain *d, struct page
     /* Unlink from original owner. */
     if ( !(memflags & MEMF_no_refcount) )
         d->tot_pages--;
-    list_del(&page->list);
+    page_list_del(page, &d->page_list);
 
     spin_unlock(&d->page_alloc_lock);
     perfc_incr(steal_page);
diff -r c7cba853583d -r af0da711bbdb xen/arch/ia64/xen/tlb_track.c
--- a/xen/arch/ia64/xen/tlb_track.c     Fri Feb 13 11:22:28 2009 +0900
+++ b/xen/arch/ia64/xen/tlb_track.c     Fri Feb 13 11:22:59 2009 +0900
@@ -56,7 +56,7 @@ tlb_track_allocate_entries(struct tlb_tr
         return -ENOMEM;
     }
 
-    list_add(&entry_page->list, &tlb_track->page_list);
+    page_list_add(entry_page, &tlb_track->page_list);
     track_entries = (struct tlb_track_entry*)page_to_virt(entry_page);
     allocated = PAGE_SIZE / sizeof(track_entries[0]);
     tlb_track->num_entries += allocated;
@@ -93,7 +93,7 @@ tlb_track_create(struct domain* d)
     tlb_track->limit = TLB_TRACK_LIMIT_ENTRIES;
     tlb_track->num_entries = 0;
     tlb_track->num_free = 0;
-    INIT_LIST_HEAD(&tlb_track->page_list);
+    INIT_PAGE_LIST_HEAD(&tlb_track->page_list);
     if (tlb_track_allocate_entries(tlb_track) < 0)
         goto out;
 
@@ -136,8 +136,8 @@ tlb_track_destroy(struct domain* d)
     spin_lock(&tlb_track->free_list_lock);
     BUG_ON(tlb_track->num_free != tlb_track->num_entries);
 
-    list_for_each_entry_safe(page, next, &tlb_track->page_list, list) {
-        list_del(&page->list);
+    page_list_for_each_safe(page, next, &tlb_track->page_list) {
+        page_list_del(page, &tlb_track->page_list);
         free_domheap_page(page);
     }
 
diff -r c7cba853583d -r af0da711bbdb xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Fri Feb 13 11:22:28 2009 +0900
+++ b/xen/include/asm-ia64/domain.h     Fri Feb 13 11:22:59 2009 +0900
@@ -10,6 +10,7 @@
 #include <asm/vmx_platform.h>
 #include <xen/list.h>
 #include <xen/cpumask.h>
+#include <xen/mm.h>
 #include <asm/fpswa.h>
 #include <xen/rangeset.h>
 
@@ -224,7 +225,7 @@ struct arch_domain {
     /* Continuable mm_teardown() */
     unsigned long mm_teardown_offset;
     /* Continuable domain_relinquish_resources() */
-    struct list_head relmem_list;
+    struct page_list_head relmem_list;
 };
 #define INT_ENABLE_OFFSET(v)             \
     (sizeof(vcpu_info_t) * (v)->vcpu_id + \
diff -r c7cba853583d -r af0da711bbdb xen/include/asm-ia64/mm.h
--- a/xen/include/asm-ia64/mm.h Fri Feb 13 11:22:28 2009 +0900
+++ b/xen/include/asm-ia64/mm.h Fri Feb 13 11:22:59 2009 +0900
@@ -39,10 +39,23 @@ typedef unsigned long page_flags_t;
 
 #define PRtype_info "016lx"
 
+#if 0
+/*
+ * See include/xen/mm.h.
+ * For now, abandon to compress struct page_info
+ * seeing IA64_MAX_PHYS_BITS and page size.
+ */
+#undef page_list_entry
+struct page_list_entry
+{
+    unsigned long next, prev;
+};
+#endif
+
 struct page_info
 {
     /* Each frame can be threaded onto a doubly-linked list. */
-    struct list_head list;
+    struct page_list_entry list;
 
     /* Reference count and various PGC_xxx flags and fields. */
     unsigned long count_info;
diff -r c7cba853583d -r af0da711bbdb xen/include/asm-ia64/tlb_track.h
--- a/xen/include/asm-ia64/tlb_track.h  Fri Feb 13 11:22:28 2009 +0900
+++ b/xen/include/asm-ia64/tlb_track.h  Fri Feb 13 11:22:59 2009 +0900
@@ -72,7 +72,7 @@ struct tlb_track {
     unsigned int                limit;
     unsigned int                num_entries;
     unsigned int                num_free;
-    struct list_head            page_list;
+    struct page_list_head       page_list;
 
     /* XXX hash table size */
     spinlock_t                  hash_lock;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.