[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [POWERPC][XEN] More Robust Memory Checking



# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID a817acb393863d9158c42285d46497925c28ef81
# Parent  ccdaa3ea71a7670b9d4e801d335037e0062e449f
[POWERPC][XEN] More Robust Memory Checking

This patch allows the platform to define the "IO Hole" of
addressibility and checks that even Dom0 does not try to Map "Remote"
memory that is not there.  Also replaces some panic calls with WARN();
and returns failure.

Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
---
 xen/arch/powerpc/mm.c               |   17 ++++-------------
 xen/arch/powerpc/powerpc64/ppc970.c |   13 +++++++++++++
 xen/arch/powerpc/usercopy.c         |   10 ++++++++--
 xen/include/asm-powerpc/mm.h        |    2 +-
 xen/include/asm-powerpc/processor.h |    1 +
 5 files changed, 27 insertions(+), 16 deletions(-)

diff -r ccdaa3ea71a7 -r a817acb39386 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Tue Sep 05 15:25:06 2006 -0400
+++ b/xen/arch/powerpc/mm.c     Thu Sep 07 01:30:12 2006 -0400
@@ -229,16 +229,6 @@ extern void copy_page(void *dp, void *sp
     }
 }
 
-static int mfn_in_hole(ulong mfn)
-{
-    /* totally cheating */
-    if (mfn >= (0xf0000000UL >> PAGE_SHIFT) &&
-        mfn < (((1UL << 32) - 1) >> PAGE_SHIFT))
-        return 1;
-
-    return 0;
-}
-
 static uint add_extent(struct domain *d, struct page_info *pg, uint order)
 {
     struct page_extents *pe;
@@ -339,7 +329,7 @@ int allocate_rma(struct domain *d, unsig
     return 0;
 }
 
-ulong pfn2mfn(struct domain *d, long pfn, int *type)
+ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
 {
     ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
     ulong rma_size_mfn = 1UL << d->arch.rma_order;
@@ -356,7 +346,7 @@ ulong pfn2mfn(struct domain *d, long pfn
     }
 
     if (test_bit(_DOMF_privileged, &d->domain_flags) &&
-        mfn_in_hole(pfn)) {
+        cpu_io_mfn(pfn)) {
         if (type)
             *type = PFN_TYPE_IO;
         return pfn;
@@ -374,7 +364,8 @@ ulong pfn2mfn(struct domain *d, long pfn
 
     /* This hack allows dom0 to map all memory, necessary to
      * initialize domU state. */
-    if (test_bit(_DOMF_privileged, &d->domain_flags)) {
+    if (test_bit(_DOMF_privileged, &d->domain_flags) &&
+        pfn < max_page) {
         if (type)
             *type = PFN_TYPE_REMOTE;
         return pfn;
diff -r ccdaa3ea71a7 -r a817acb39386 xen/arch/powerpc/powerpc64/ppc970.c
--- a/xen/arch/powerpc/powerpc64/ppc970.c       Tue Sep 05 15:25:06 2006 -0400
+++ b/xen/arch/powerpc/powerpc64/ppc970.c       Thu Sep 07 01:30:12 2006 -0400
@@ -88,6 +88,19 @@ unsigned int cpu_extent_order(void)
     return log_large_page_sizes[0] - PAGE_SHIFT;
 }
 
+
+/* This is more a platform thing than a CPU thing, but we only have
+ * one platform now */
+int cpu_io_mfn(ulong mfn)
+{
+    /* totally cheating */
+    if (mfn >= (2UL << (30 - PAGE_SHIFT)) && /* 2GiB */
+        mfn < (4UL << (30 - PAGE_SHIFT)))    /* 4GiB */
+        return 1;
+
+    return 0;
+}
+
 static u64 cpu0_hids[6];
 static u64 cpu0_hior;
 
diff -r ccdaa3ea71a7 -r a817acb39386 xen/arch/powerpc/usercopy.c
--- a/xen/arch/powerpc/usercopy.c       Tue Sep 05 15:25:06 2006 -0400
+++ b/xen/arch/powerpc/usercopy.c       Thu Sep 07 01:30:12 2006 -0400
@@ -56,15 +56,21 @@ static unsigned long paddr_to_maddr(unsi
     case PFN_TYPE_RMA:
     case PFN_TYPE_LOGICAL:
         break;
+
     case PFN_TYPE_REMOTE:
+        /* I don't think this should ever happen, but I suppose it
+         * could be possible */
         printk("%s: Dom:%d paddr: 0x%lx type: REMOTE\n",
                __func__, d->domain_id, paddr);
         WARN();
         break;
+
+    case PFN_TYPE_IO:
     default:
-        panic("%s: Dom:%d paddr: 0x%lx bad type:0x%x\n",
+        printk("%s: Dom:%d paddr: 0x%lx bad type: 0x%x\n",
                __func__, d->domain_id, paddr, mtype);
-        break;
+        WARN();
+        return 0;
     }
     pa <<= PAGE_SHIFT;
     pa |= offset;
diff -r ccdaa3ea71a7 -r a817acb39386 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h      Tue Sep 05 15:25:06 2006 -0400
+++ b/xen/include/asm-powerpc/mm.h      Thu Sep 07 01:30:12 2006 -0400
@@ -241,7 +241,7 @@ extern int update_grant_va_mapping(unsig
 #define PFN_TYPE_IO 3
 #define PFN_TYPE_REMOTE 4
 
-extern ulong pfn2mfn(struct domain *d, long pfn, int *type);
+extern ulong pfn2mfn(struct domain *d, ulong pfn, int *type);
 
 /* Arch-specific portion of memory_op hypercall. */
 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
diff -r ccdaa3ea71a7 -r a817acb39386 xen/include/asm-powerpc/processor.h
--- a/xen/include/asm-powerpc/processor.h       Tue Sep 05 15:25:06 2006 -0400
+++ b/xen/include/asm-powerpc/processor.h       Thu Sep 07 01:30:12 2006 -0400
@@ -46,6 +46,7 @@ extern uint cpu_large_page_orders(uint *
 extern uint cpu_large_page_orders(uint *sizes, uint max);
 extern void cpu_initialize(int cpuid);
 extern void cpu_init_vcpu(struct vcpu *);
+extern int cpu_io_mfn(ulong mfn);
 extern void save_cpu_sprs(struct vcpu *);
 extern void load_cpu_sprs(struct vcpu *);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.