[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [XEN][POWERPC] Lots of domain page managment cleanups.



# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID 43ec7afa5734a8c2d8840c01e50255d7c96f5abd
# Parent  7825169895d0652dd16f10a9186cba5b84b6933e
[XEN][POWERPC] Lots of domain page managment cleanups.
Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
---
 xen/arch/powerpc/domain.c           |   10 +
 xen/arch/powerpc/domain_build.c     |   32 +++--
 xen/arch/powerpc/iommu.c            |   17 +--
 xen/arch/powerpc/mm.c               |  200 +++++++++++++++++++++++++++++-------
 xen/arch/powerpc/papr/xlate.c       |    4 
 xen/arch/powerpc/powerpc64/ppc970.c |   11 +
 xen/include/asm-powerpc/domain.h    |    6 -
 xen/include/asm-powerpc/mm.h        |  179 +++++++++++++++++++-------------
 xen/include/asm-powerpc/processor.h |    3 
 9 files changed, 321 insertions(+), 141 deletions(-)

diff -r 7825169895d0 -r 43ec7afa5734 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/arch/powerpc/domain.c Fri Aug 25 15:28:48 2006 -0400
@@ -27,6 +27,7 @@
 #include <xen/domain.h>
 #include <xen/console.h>
 #include <xen/shutdown.h>
+#include <xen/mm.h>
 #include <asm/htab.h>
 #include <asm/current.h>
 #include <asm/hcalls.h>
@@ -76,6 +77,7 @@ int arch_domain_create(struct domain *d)
     unsigned long rma_base;
     unsigned long rma_sz;
     uint htab_order;
+    uint nr_pages;
 
     if (d->domain_id == IDLE_DOMAIN_ID) {
         d->shared_info = (void *)alloc_xenheap_page();
@@ -88,11 +90,13 @@ int arch_domain_create(struct domain *d)
     rma_sz = rma_size(d->arch.rma_order);
 
     /* allocate the real mode area */
-    d->max_pages = 1UL << d->arch.rma_order;
+    nr_pages =  1UL << d->arch.rma_order;
+    d->max_pages = nr_pages;
     d->tot_pages = 0;
     d->arch.rma_page = alloc_domheap_pages(d, d->arch.rma_order, 0);
     if (NULL == d->arch.rma_page)
         return 1;
+
     rma_base = page_to_maddr(d->arch.rma_page);
 
     BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
@@ -103,8 +107,8 @@ int arch_domain_create(struct domain *d)
     d->shared_info = (shared_info_t *)
         (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
 
-    d->arch.large_page_sizes = 1;
-    d->arch.large_page_shift[0] = 24; /* 16 M for 970s */
+    d->arch.large_page_sizes = cpu_large_page_orders(
+        d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
 
     /* FIXME: we need to the the maximum addressible memory for this
      * domain to calculate this correctly. It should probably be set
diff -r 7825169895d0 -r 43ec7afa5734 xen/arch/powerpc/domain_build.c
--- a/xen/arch/powerpc/domain_build.c   Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/arch/powerpc/domain_build.c   Fri Aug 25 15:28:48 2006 -0400
@@ -34,17 +34,21 @@ extern int loadelfimage_32(struct domain
 extern int loadelfimage_32(struct domain_setup_info *dsi);
 
 /* opt_dom0_mem: memory allocated to domain 0. */
-static unsigned int opt_dom0_mem;
+static unsigned int dom0_nrpages;
 static void parse_dom0_mem(char *s)
 {
-    unsigned long long bytes = parse_size_and_unit(s);
-    /* If no unit is specified we default to kB units, not bytes. */
-    if (isdigit(s[strlen(s)-1]))
-        opt_dom0_mem = (unsigned int)bytes;
-    else
-        opt_dom0_mem = (unsigned int)(bytes >> 10);
+    unsigned long long bytes;
+
+    bytes = parse_size_and_unit(s);
+    dom0_nrpages = bytes >> PAGE_SHIFT;
 }
 custom_param("dom0_mem", parse_dom0_mem);
+
+static unsigned int opt_dom0_max_vcpus;
+integer_param("dom0_max_vcpus", opt_dom0_max_vcpus);
+
+static unsigned int opt_dom0_shadow;
+boolean_param("dom0_shadow", opt_dom0_shadow);
 
 int elf_sanity_check(Elf_Ehdr *ehdr)
 {
@@ -146,8 +150,14 @@ int construct_dom0(struct domain *d,
 
     /* By default DOM0 is allocated all available memory. */
     d->max_pages = ~0U;
-    d->tot_pages = 1UL << d->arch.rma_order;
-
+
+    if (dom0_nrpages == 0) {
+        dom0_nrpages = 1UL << d->arch.rma_order;
+    }
+
+    d->tot_pages = dom0_nrpages;
+    ASSERT(d->tot_pages > 0);
+    
     ASSERT( image_len < rma_sz );
 
     si = (start_info_t *)(rma_addr(&d->arch, RMA_START_INFO) + rma);
@@ -161,10 +171,6 @@ int construct_dom0(struct domain *d,
     printk("shared_info: 0x%lx,%p\n", si->shared_info, d->shared_info);
 
     eomem = si->shared_info;
-
-    /* allow dom0 to access all of system RAM */
-    d->arch.logical_base_pfn = 128 << (20 - PAGE_SHIFT); /* 128 MB */
-    d->arch.logical_end_pfn = max_page;
 
     /* number of pages accessible */
     si->nr_pages = rma_sz >> PAGE_SHIFT;
diff -r 7825169895d0 -r 43ec7afa5734 xen/arch/powerpc/iommu.c
--- a/xen/arch/powerpc/iommu.c  Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/arch/powerpc/iommu.c  Fri Aug 25 15:28:48 2006 -0400
@@ -52,17 +52,14 @@ int iommu_put(u32 buid, ulong ioba, unio
 
         pfn = tce.tce_bits.tce_rpn;
         mfn = pfn2mfn(d, pfn, &mtype);
-        if (mtype != 0) {
-            panic("we don't do non-RMO memory yet\n");
+        if (mfn > 0) {
+#ifdef DEBUG
+            printk("%s: ioba=0x%lx pfn=0x%lx mfn=0x%lx\n", __func__,
+                   ioba, pfn, mfn);
+#endif
+            tce.tce_bits.tce_rpn = mfn;
+            return iommu_phbs[buid].iommu_put(ioba, tce);
         }
-
-#ifdef DEBUG
-        printk("%s: ioba=0x%lx pfn=0x%lx mfn=0x%lx\n", __func__,
-               ioba, pfn, mfn);
-#endif
-        tce.tce_bits.tce_rpn = mfn;
-
-        return iommu_phbs[buid].iommu_put(ioba, tce);
     }
     return -1;
 }
diff -r 7825169895d0 -r 43ec7afa5734 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/arch/powerpc/mm.c     Fri Aug 25 15:28:48 2006 -0400
@@ -13,9 +13,10 @@
  * along with this program; if not, write to the Free Software
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  *
- * Copyright (C) IBM Corp. 2005
+ * Copyright (C) IBM Corp. 2005, 2006
  *
  * Authors: Hollis Blanchard <hollisb@xxxxxxxxxx>
+ *          Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
  */
 
 #include <xen/config.h>
@@ -23,9 +24,18 @@
 #include <xen/shadow.h>
 #include <xen/kernel.h>
 #include <xen/sched.h>
+#include <xen/perfc.h>
 #include <asm/misc.h>
 #include <asm/init.h>
 #include <asm/page.h>
+
+#ifdef VERBOSE
+#define MEM_LOG(_f, _a...)                                  \
+  printk("DOM%u: (file=mm.c, line=%d) " _f "\n",            \
+         current->domain->domain_id , __LINE__ , ## _a )
+#else
+#define MEM_LOG(_f, _a...) ((void)0)
+#endif
 
 /* Frame table and its size in pages. */
 struct page_info *frame_table;
@@ -53,16 +63,128 @@ int steal_page(struct domain *d, struct 
     return 1;
 }
 
-
-int get_page_type(struct page_info *page, u32 type)
-{
-    panic("%s called\n", __func__);
-    return 1;
-}
-
 void put_page_type(struct page_info *page)
 {
-    panic("%s called\n", __func__);
+    unsigned long nx, x, y = page->u.inuse.type_info;
+
+    do {
+        x  = y;
+        nx = x - 1;
+
+        ASSERT((x & PGT_count_mask) != 0);
+
+        /*
+         * The page should always be validated while a reference is held. The 
+         * exception is during domain destruction, when we forcibly invalidate 
+         * page-table pages if we detect a referential loop.
+         * See domain.c:relinquish_list().
+         */
+        ASSERT((x & PGT_validated) || 
+               test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
+
+        if ( unlikely((nx & PGT_count_mask) == 0) )
+        {
+            /* Record TLB information for flush later. */
+            page->tlbflush_timestamp = tlbflush_current_time();
+        }
+        else if ( unlikely((nx & (PGT_pinned|PGT_type_mask|PGT_count_mask)) == 
+                           (PGT_pinned | 1)) )
+        {
+            /* Page is now only pinned. Make the back pointer mutable again. */
+            nx |= PGT_va_mutable;
+        }
+    }
+    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
+}
+
+
+int get_page_type(struct page_info *page, unsigned long type)
+{
+    unsigned long nx, x, y = page->u.inuse.type_info;
+
+ again:
+    do {
+        x  = y;
+        nx = x + 1;
+        if ( unlikely((nx & PGT_count_mask) == 0) )
+        {
+            MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
+            return 0;
+        }
+        else if ( unlikely((x & PGT_count_mask) == 0) )
+        {
+            if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
+            {
+                if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
+                {
+                    /*
+                     * On type change we check to flush stale TLB
+                     * entries. This may be unnecessary (e.g., page
+                     * was GDT/LDT) but those circumstances should be
+                     * very rare.
+                     */
+                    cpumask_t mask =
+                        page_get_owner(page)->domain_dirty_cpumask;
+                    tlbflush_filter(mask, page->tlbflush_timestamp);
+
+                    if ( unlikely(!cpus_empty(mask)) )
+                    {
+                        perfc_incrc(need_flush_tlb_flush);
+                        flush_tlb_mask(mask);
+                    }
+                }
+
+                /* We lose existing type, back pointer, and validity. */
+                nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated);
+                nx |= type;
+
+                /* No special validation needed for writable pages. */
+                /* Page tables and GDT/LDT need to be scanned for validity. */
+                if ( type == PGT_writable_page )
+                    nx |= PGT_validated;
+            }
+        }
+        else
+        {
+            if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
+            {
+                if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
+                {
+                    return 0;
+                }
+                else if ( (x & PGT_va_mask) == PGT_va_mutable )
+                {
+                    /* The va backpointer is mutable, hence we update it. */
+                    nx &= ~PGT_va_mask;
+                    nx |= type; /* we know the actual type is correct */
+                }
+                else if ( (type & PGT_va_mask) != PGT_va_mutable )
+                {
+                    ASSERT((type & PGT_va_mask) != (x & PGT_va_mask));
+
+                    /* This table is possibly mapped at multiple locations. */
+                    nx &= ~PGT_va_mask;
+                    nx |= PGT_va_unknown;
+                }
+            }
+            if ( unlikely(!(x & PGT_validated)) )
+            {
+                /* Someone else is updating validation of this page. Wait... */
+                while ( (y = page->u.inuse.type_info) == x )
+                    cpu_relax();
+                goto again;
+            }
+        }
+    }
+    while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
+
+    if ( unlikely(!(nx & PGT_validated)) )
+    {
+        /* Noone else is updating simultaneously. */
+        __set_bit(_PGT_validated, &page->u.inuse.type_info);
+    }
+
+    return 1;
 }
 
 void __init init_frametable(void)
@@ -107,44 +229,50 @@ extern void copy_page(void *dp, void *sp
     }
 }
 
+static int mfn_in_hole(ulong mfn)
+{
+    /* totally cheating */
+    if (mfn >= (0xf0000000UL >> PAGE_SHIFT) &&
+        mfn < (((1UL << 32) - 1) >> PAGE_SHIFT))
+        return 1;
+
+    return 0;
+}
+
 ulong pfn2mfn(struct domain *d, long pfn, int *type)
 {
     ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
     ulong rma_size_mfn = 1UL << d->arch.rma_order;
-    ulong mfn;
-    int t;
 
     if (pfn < rma_size_mfn) {
-        mfn = pfn + rma_base_mfn;
-        t = PFN_TYPE_RMA;
-    } else if (pfn >= d->arch.logical_base_pfn &&
-               pfn < d->arch.logical_end_pfn) {
-        if (test_bit(_DOMF_privileged, &d->domain_flags)) {
-            /* This hack allows dom0 to map all memory, necessary to
-             * initialize domU state. */
-            mfn = pfn;
-        } else {
-            panic("we do not handle the logical area yet\n");
-            mfn = 0;
-        }
-
-        t = PFN_TYPE_LOGICAL;
-    } else {
-        /* don't know */
-        mfn = pfn;
-        t = PFN_TYPE_IO;
-    }
-
-    if (type != NULL)
-        *type = t;
-
-    return mfn;
+        if (type)
+            *type = PFN_TYPE_RMA;
+        return pfn + rma_base_mfn;
+    }
+
+    if (test_bit(_DOMF_privileged, &d->domain_flags) &&
+        mfn_in_hole(pfn)) {
+        if (type)
+            *type = PFN_TYPE_IO;
+        return pfn;
+    }
+
+    /* This hack allows dom0 to map all memory, necessary to
+     * initialize domU state. */
+    if (test_bit(_DOMF_privileged, &d->domain_flags)) {
+        if (type)
+            *type = PFN_TYPE_REMOTE;
+        return pfn;
+    }
+
+    BUG();
+    return 0;
 }
 
 void guest_physmap_add_page(
     struct domain *d, unsigned long gpfn, unsigned long mfn)
 {
-    panic("%s\n", __func__);
+    printk("%s(%d, 0x%lx, 0x%lx)\n", __func__, d->domain_id, gpfn, mfn);
 }
 void guest_physmap_remove_page(
     struct domain *d, unsigned long gpfn, unsigned long mfn)
diff -r 7825169895d0 -r 43ec7afa5734 xen/arch/powerpc/papr/xlate.c
--- a/xen/arch/powerpc/papr/xlate.c     Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/arch/powerpc/papr/xlate.c     Fri Aug 25 15:28:48 2006 -0400
@@ -154,13 +154,13 @@ static void h_enter(struct cpu_user_regs
         }
 
         /* get correct pgshift value */
-        pgshift = d->arch.large_page_shift[lp_size];
+        pgshift = d->arch.large_page_order[lp_size] + PAGE_SHIFT;
     }
 
     /* get the correct logical RPN in terms of 4K pages need to mask
      * off lp bits and unused arpn bits if this is a large page */
 
-    lpn = ~0ULL << (pgshift - 12);
+    lpn = ~0ULL << (pgshift - PAGE_SHIFT);
     lpn = pte.bits.rpn & lpn;
 
     rpn = pfn2mfn(d, lpn, &mtype);
diff -r 7825169895d0 -r 43ec7afa5734 xen/arch/powerpc/powerpc64/ppc970.c
--- a/xen/arch/powerpc/powerpc64/ppc970.c       Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/arch/powerpc/powerpc64/ppc970.c       Fri Aug 25 15:28:48 2006 -0400
@@ -40,6 +40,17 @@ unsigned int cpu_rma_order(void)
     uint rma_log_size = 6 + 20; /* (1 << 6) == 64 */
     return rma_log_size - PAGE_SHIFT;
 }
+
+unsigned int cpu_large_page_orders(uint *sizes, uint max)
+{
+    uint lp_log_size = 4 + 20; /* (1 << 4) == 16M */
+    if (max < 1)
+        return 0;
+
+    sizes[0] = lp_log_size - PAGE_SHIFT;
+
+    return 1;
+}    
 
 void cpu_initialize(int cpuid)
 {
diff -r 7825169895d0 -r 43ec7afa5734 xen/include/asm-powerpc/domain.h
--- a/xen/include/asm-powerpc/domain.h  Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/include/asm-powerpc/domain.h  Fri Aug 25 15:28:48 2006 -0400
@@ -38,15 +38,11 @@ struct arch_domain {
     struct page_info *rma_page;
     uint rma_order;
 
-    /* This is regular memory, only available thru translataion */
-    ulong logical_base_pfn;
-    ulong logical_end_pfn;
-
     /* I/O-port access bitmap mask. */
     u8 *iobmp_mask;       /* Address of IO bitmap mask, or NULL.      */
 
     uint large_page_sizes;
-    char large_page_shift[4];
+    uint large_page_order[4];
 } __cacheline_aligned;
 
 struct slb_entry {
diff -r 7825169895d0 -r 43ec7afa5734 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/include/asm-powerpc/mm.h      Fri Aug 25 15:28:48 2006 -0400
@@ -24,6 +24,7 @@
 #include <public/xen.h>
 #include <xen/list.h>
 #include <xen/types.h>
+#include <xen/mm.h>
 #include <asm/misc.h>
 #include <asm/system.h>
 #include <asm/flushtlb.h>
@@ -33,7 +34,6 @@
 #define memguard_unguard_range(_p,_l)    ((void)0)
 
 extern unsigned long xenheap_phys_end;
-#define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
 
 /*
  * Per-page-frame information.
@@ -43,7 +43,6 @@ extern unsigned long xenheap_phys_end;
  *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
  */
 #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
-#define PRtype_info "016lx"
 
 /* XXX copy-and-paste job; re-examine me */
 struct page_info
@@ -63,7 +62,7 @@ struct page_info
         /* Page is in use: ((count_info & PGC_count_mask) != 0). */
         struct {
             /* Owner of this page (NULL if page is anonymous). */
-            struct domain *_domain;
+            u32 _domain;
             /* Type reference count and various PGT_xxx flags and fields. */
             unsigned long type_info;
         } inuse;
@@ -80,80 +79,132 @@ struct page_info
 
 };
 
+struct page_extents {
+    /* Each frame can be threaded onto a doubly-linked list. */
+    struct list_head pe_list;
+
+    /* page extent */
+    struct page_info *pg;
+    uint order;
+    ulong pfn;
+};
+
  /* The following page types are MUTUALLY EXCLUSIVE. */
 #define PGT_none            (0<<29) /* no special uses of this page */
-#define PGT_l1_page_table   (1<<29) /* using this page as an L1 page table? */
-#define PGT_l2_page_table   (2<<29) /* using this page as an L2 page table? */
-#define PGT_l3_page_table   (3<<29) /* using this page as an L3 page table? */
-#define PGT_l4_page_table   (4<<29) /* using this page as an L4 page table? */
-#define PGT_gdt_page        (5<<29) /* using this page in a GDT? */
-#define PGT_ldt_page        (6<<29) /* using this page in an LDT? */
+#define PGT_RMA             (1<<29) /* This page is an RMA page? */
 #define PGT_writable_page   (7<<29) /* has writable mappings of this page? */
 #define PGT_type_mask       (7<<29) /* Bits 29-31. */
+
+ /* Owning guest has pinned this page to its current type? */
+#define _PGT_pinned         28
+#define PGT_pinned          (1U<<_PGT_pinned)
  /* Has this page been validated for use as its current type? */
-#define _PGT_validated      28
+#define _PGT_validated      27
 #define PGT_validated       (1U<<_PGT_validated)
- /* Owning guest has pinned this page to its current type? */
-#define _PGT_pinned         27
-#define PGT_pinned          (1U<<_PGT_pinned)
- /* The 10 most significant bits of virt address if this is a page table. */
-#define PGT_va_shift        17
-#define PGT_va_mask         (((1U<<10)-1)<<PGT_va_shift)
+
+ /* The 27 most significant bits of virt address if this is a page table. */
+#define PGT_va_shift        32
+#define PGT_va_mask         ((unsigned long)((1U<<28)-1)<<PGT_va_shift)
  /* Is the back pointer still mutable (i.e. not fixed yet)? */
-#define PGT_va_mutable      (((1U<<10)-1)<<PGT_va_shift)
+#define PGT_va_mutable      ((unsigned long)((1U<<28)-1)<<PGT_va_shift)
  /* Is the back pointer unknown (e.g., p.t. is mapped at multiple VAs)? */
-#define PGT_va_unknown      (((1U<<10)-2)<<PGT_va_shift)
- /* 17-bit count of uses of this frame as its current type. */
-#define PGT_count_mask      ((1U<<17)-1)
+#define PGT_va_unknown      ((unsigned long)((1U<<28)-2)<<PGT_va_shift)
+
+ /* 16-bit count of uses of this frame as its current type. */
+#define PGT_count_mask      ((1U<<16)-1)
 
  /* Cleared when the owning guest 'frees' this page. */
 #define _PGC_allocated      31
 #define PGC_allocated       (1U<<_PGC_allocated)
- /* 31-bit count of references to this frame. */
-#define PGC_count_mask      ((1U<<31)-1)
+ /* Set on a *guest* page to mark it out-of-sync with its shadow */
+#define _PGC_out_of_sync     30
+#define PGC_out_of_sync     (1U<<_PGC_out_of_sync)
+ /* Set when is using a page as a page table */
+#define _PGC_page_table      29
+#define PGC_page_table      (1U<<_PGC_page_table)
+ /* 29-bit count of references to this frame. */
+#define PGC_count_mask      ((1U<<29)-1)
+
+#define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
+
+static inline struct domain *unpickle_domptr(u32 _domain)
+{ return ((_domain == 0) || (_domain & 1)) ? NULL : __va(_domain); }
+
+static inline u32 pickle_domptr(struct domain *domain)
+{ return (domain == NULL) ? 0 : (u32)__pa(domain); }
+
+#define PRtype_info "016lx"/* should only be used for printk's */
+
+#define page_get_owner(_p)    (unpickle_domptr((_p)->u.inuse._domain))
+#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
+
+extern struct page_info *frame_table;
+extern unsigned long max_page;
+extern unsigned long total_pages;
+void init_frametable(void);
 
 static inline void put_page(struct page_info *page)
 {
-#if 0
-    int count;
-
-    count = atomic_dec_return(&page->count_info);
-
-    if ( unlikely((count & PGC_count_mask) == 0) )
+    u32 nx, x, y = page->count_info;
+
+    do {
+        x  = y;
+        nx = x - 1;
+    }
+    while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
+
+    if ( unlikely((nx & PGC_count_mask) == 0) ) {
+        panic("about to free page\n");
         free_domheap_page(page);
-#else
-    trap();
-#endif
+    }
 }
 
 static inline int get_page(struct page_info *page,
                            struct domain *domain)
 {
-#if 0
-    int count;
-
-    count = atomic_inc_return(&page->count_info);
-
-    if (((count & PGC_count_mask) == 0) ||      /* Count overflow? */
-            ((count & PGC_count_mask) == 1) ||  /* Wasn't allocated? */
-            ((page->domain != domain)))         /* Wrong owner? */
-    {
-        atomic_dec(&page->count_info);
-        return 0;
-    }
-
-#else
-    trap();
-#endif
+    u32 x, nx, y = page->count_info;
+    u32 d, nd = page->u.inuse._domain;
+    u32 _domain = pickle_domptr(domain);
+
+    do {
+        x  = y;
+        nx = x + 1;
+        d  = nd;
+        if ( unlikely((x & PGC_count_mask) == 0) ||  /* Not allocated? */
+             unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */
+             unlikely(d != _domain) )                /* Wrong owner? */
+        {
+            return 0;
+        }
+        y = cmpxchg(&page->count_info, x, nx);
+    }
+    while ( unlikely(y != x) );
+
     return 1;
+}
+
+extern void put_page_type(struct page_info *page);
+extern int  get_page_type(struct page_info *page, unsigned long type);
+
+static inline void put_page_and_type(struct page_info *page)
+{
+    put_page_type(page);
+    put_page(page);
 }
 
 static inline int get_page_and_type(struct page_info *page,
                                     struct domain *domain,
-                                    u32 type)
-{
-    trap();
-    return 1;
+                                    unsigned long type)
+{
+    int rc = get_page(page, domain);
+
+    if ( likely(rc) && unlikely(!get_page_type(page, type)) )
+    {
+        put_page(page);
+        rc = 0;
+    }
+
+    return rc;
 }
 
 static inline int page_is_removable(struct page_info *page)
@@ -161,16 +212,9 @@ static inline int page_is_removable(stru
     return ((page->count_info & PGC_count_mask) == 1);
 }
 
-int get_page_type(struct page_info *page, u32 type);
-
 #define set_machinetophys(_mfn, _pfn) (trap(), 0)
 
 extern void synchronise_pagetables(unsigned long cpu_mask);
-
-static inline void put_page_and_type(struct page_info *page)
-{
-    trap();
-}
 
 /* XXX don't know what this is for */
 typedef struct {
@@ -179,17 +223,10 @@ typedef struct {
 } vm_assist_info_t;
 extern vm_assist_info_t vm_assist_info[];
 
-#define page_get_owner(_p)    ((_p)->u.inuse._domain)
-#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = _d)
-
 #define share_xen_page_with_guest(p, d, r) do { } while (0)
 #define share_xen_page_with_privileged_guests(p, r) do { } while (0)
 
-extern struct page_info *frame_table;
 extern unsigned long frame_table_size;
-extern unsigned long max_page;
-extern unsigned long total_pages;
-void init_frametable(void);
 
 /* hope that accesses to this will fail spectacularly */
 #define machine_to_phys_mapping ((u32 *)-1UL)
@@ -199,12 +236,12 @@ extern int update_grant_va_mapping(unsig
                                    struct domain *,
                                    struct vcpu *);
 
-extern void put_page_type(struct page_info *page);
-
-#define PFN_TYPE_RMA 0
-#define PFN_TYPE_LOGICAL 1
-#define PFN_TYPE_IO 2
-extern ulong pfn2mfn(struct domain *d, long mfn, int *type);
+#define PFN_TYPE_RMA 1
+#define PFN_TYPE_LOGICAL 2
+#define PFN_TYPE_IO 3
+#define PFN_TYPE_REMOTE 4
+
+extern ulong pfn2mfn(struct domain *d, long pfn, int *type);
 
 /* Arch-specific portion of memory_op hypercall. */
 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
diff -r 7825169895d0 -r 43ec7afa5734 xen/include/asm-powerpc/processor.h
--- a/xen/include/asm-powerpc/processor.h       Fri Aug 25 14:34:51 2006 -0400
+++ b/xen/include/asm-powerpc/processor.h       Fri Aug 25 15:28:48 2006 -0400
@@ -40,7 +40,8 @@ extern void show_registers(struct cpu_us
 extern void show_registers(struct cpu_user_regs *);
 extern void show_execution_state(struct cpu_user_regs *);
 extern void show_backtrace(ulong sp, ulong lr, ulong pc);
-extern unsigned int cpu_rma_order(void);
+extern uint cpu_rma_order(void);
+extern uint cpu_large_page_orders(uint *sizes, uint max);
 extern void cpu_initialize(int cpuid);
 extern void cpu_init_vcpu(struct vcpu *);
 extern void save_cpu_sprs(struct vcpu *);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.