[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [POWERPC][XEN] Safety with foreign get_page() calls and RMA



# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID 0cdac06f1a9dfeec0ce16daac562c68669934033
# Parent  9bf0fc041e14494796329f001df9b3e715243db9
[POWERPC][XEN] Safety with foreign get_page() calls and RMA

The following patch deals with get_page() possibly failing for H_ENTER
on a foreign page and returning the correct error.  We also tag and
checke that a put_page() for RMA pages will panic (for now) if the
domain is _not_ dying.

Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
---
 xen/arch/powerpc/mm.c         |   20 ++++++++++++--
 xen/arch/powerpc/papr/xlate.c |   58 +++++++++++++++++++++++++-----------------
 xen/include/asm-powerpc/mm.h  |    8 +++++
 3 files changed, 59 insertions(+), 27 deletions(-)

diff -r 9bf0fc041e14 -r 0cdac06f1a9d xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Thu Sep 14 01:36:39 2006 -0400
+++ b/xen/arch/powerpc/mm.c     Thu Sep 14 01:41:13 2006 -0400
@@ -287,6 +287,7 @@ int allocate_rma(struct domain *d, unsig
     struct vcpu *v;
     ulong rma_base;
     ulong rma_sz;
+    int i;
 
     if (d->arch.rma_page)
         return -EINVAL;
@@ -301,11 +302,17 @@ int allocate_rma(struct domain *d, unsig
 
     rma_base = page_to_maddr(d->arch.rma_page);
     rma_sz = rma_size(d->arch.rma_order);
+
     BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
 
-    /* XXX shouldn't be needed */
-    printk("clearing RMA: 0x%lx[0x%lx]\n", rma_base, rma_sz);
-    memset((void *)rma_base, 0, rma_sz);
+    printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
+           d->domain_id, rma_base, rma_sz);
+
+    for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
+        /* Add in any extra CPUs that need flushing because of this page. */
+        d->arch.rma_page[i].count_info |= PGC_page_RMA;
+        clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
+    }
 
     d->shared_info = (shared_info_t *)
         (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
@@ -318,6 +325,13 @@ int allocate_rma(struct domain *d, unsig
 
     return 0;
 }
+void free_rma_check(struct page_info *page)
+{
+    if (test_bit(_PGC_page_RMA, &page->count_info) &&
+        !test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags))
+        panic("Attempt to free an RMA page: 0x%lx\n", page_to_mfn(page));
+}
+
 
 ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
 {
diff -r 9bf0fc041e14 -r 0cdac06f1a9d xen/arch/powerpc/papr/xlate.c
--- a/xen/arch/powerpc/papr/xlate.c     Thu Sep 14 01:36:39 2006 -0400
+++ b/xen/arch/powerpc/papr/xlate.c     Thu Sep 14 01:41:13 2006 -0400
@@ -123,6 +123,9 @@ static void h_enter(struct cpu_user_regs
     struct vcpu *v = get_current();
     struct domain *d = v->domain;
     int mtype;
+    struct page_info *pg = NULL;
+    struct domain *f = NULL;
+
 
     htab = &d->arch.htab;
     if (ptex > (1UL << htab->log_num_ptes)) {
@@ -203,15 +206,39 @@ static void h_enter(struct cpu_user_regs
     pte.bits.ts = 0x0;
     pte.bits.res2 = 0x0;
 
+    if (mtype == PFN_TYPE_FOREIGN) {
+        pg = mfn_to_page(mfn);
+        f = page_get_owner(pg);
+        
+        BUG_ON(f == d);
+
+        if (unlikely(!get_domain(f))) {
+            regs->gprs[3] = H_Rescinded;
+            return;
+        }
+        if (unlikely(!get_page(pg, f))) {
+            put_domain(f);
+            regs->gprs[3] = H_Rescinded;
+            return;
+        }
+    }
+
     if ( !(flags & H_EXACT) ) {
         /* PTEG (not specific PTE); clear 3 lowest bits */
         ptex &= ~0x7UL;
         limit = 7;
     }
 
-        /* data manipulations should be done prior to the pte insertion. */
+    /* data manipulations should be done prior to the pte insertion. */
     if ( flags & H_ZERO_PAGE ) {
-        memset((void *)(mfn << PAGE_SHIFT), 0, 1UL << pgshift);
+        ulong pg = mfn << PAGE_SHIFT;
+        ulong pgs = 1UL << pgshift;
+
+        while (pgs > 0) {
+            clear_page((void *)pg);
+            pg += PAGE_SIZE;
+            --pgs;
+        }
     }
 
     if ( flags & H_ICACHE_INVALIDATE ) {
@@ -252,27 +279,6 @@ static void h_enter(struct cpu_user_regs
             regs->gprs[3] = H_Success;
             regs->gprs[4] = idx;
 
-            
-            switch (mtype) {
-            case PFN_TYPE_IO:
-                break;
-            case PFN_TYPE_FOREIGN:
-            {
-                struct page_info *pg = mfn_to_page(mfn);
-                struct domain *f = page_get_owner(pg);
-
-                BUG_ON(f == d);
-                get_domain(f);
-                get_page(pg, f);
-            }
-                break;
-            case PFN_TYPE_RMA:
-            case PFN_TYPE_LOGICAL:
-                break;
-            default:
-                BUG();
-            }
-
             return;
         }
     }
@@ -281,6 +287,12 @@ static void h_enter(struct cpu_user_regs
     /* If the PTEG is full then no additional values are returned. */
     printk("%s: PTEG FULL\n", __func__);
 #endif
+
+    if (pg != NULL)
+        put_page(pg);
+
+    if (f != NULL)
+        put_domain(f);
 
     regs->gprs[3] = H_PTEG_Full;
 }
diff -r 9bf0fc041e14 -r 0cdac06f1a9d xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Thu Sep 14 01:36:39 2006 -0400
+++ b/xen/include/asm-powerpc/mm.h      Thu Sep 14 01:41:13 2006 -0400
@@ -122,8 +122,11 @@ struct page_extents {
  /* Set when is using a page as a page table */
 #define _PGC_page_table      29
 #define PGC_page_table      (1U<<_PGC_page_table)
+/* Set when using page for RMA */
+#define _PGC_page_RMA      28
+#define PGC_page_RMA      (1U<<_PGC_page_RMA)
  /* 29-bit count of references to this frame. */
-#define PGC_count_mask      ((1U<<29)-1)
+#define PGC_count_mask      ((1U<<28)-1)
 
 #define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
 
@@ -142,6 +145,7 @@ extern unsigned long max_page;
 extern unsigned long max_page;
 extern unsigned long total_pages;
 void init_frametable(void);
+void free_rma_check(struct page_info *page);
 
 static inline void put_page(struct page_info *page)
 {
@@ -154,6 +158,8 @@ static inline void put_page(struct page_
     while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
 
     if ( unlikely((nx & PGC_count_mask) == 0) ) {
+        /* RMA pages can only be released while the domain is dying */
+        free_rma_check(page);
         free_domheap_page(page);
     }
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.