[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Fix xen_in_range() for fragmented percpu data area.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1263463424 0
# Node ID 50bd4235f4864683a1ab691999bb399db96b5b3d
# Parent  aaf34d74b62228555c7542568781319b6c477d12
x86: Fix xen_in_range() for fragmented percpu data area.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/setup.c |   36 ++++++++++++++++++++----------------
 1 files changed, 20 insertions(+), 16 deletions(-)

diff -r aaf34d74b622 -r 50bd4235f486 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Thu Jan 14 09:44:08 2010 +0000
+++ b/xen/arch/x86/setup.c      Thu Jan 14 10:03:44 2010 +0000
@@ -230,7 +230,7 @@ static void __init percpu_free_unused_ar
     /* Free all unused per-cpu data areas. */
     free_xen_data(&__per_cpu_start[first_unused << PERCPU_SHIFT], __bss_start);
 
-    data_size = (data_size + PAGE_SIZE + 1) & PAGE_MASK;
+    data_size = (data_size + PAGE_SIZE - 1) & PAGE_MASK;
     if ( data_size != PERCPU_SIZE )
         for ( i = 0; i < first_unused; i++ )
             free_xen_data(&__per_cpu_start[(i << PERCPU_SHIFT) + data_size],
@@ -1198,33 +1198,37 @@ int xen_in_range(paddr_t start, paddr_t 
 int xen_in_range(paddr_t start, paddr_t end)
 {
     int i;
+
+    enum { region_s3, region_text, region_percpu, region_bss, nr_regions };
     static struct {
         paddr_t s, e;
-    } xen_regions[4];
+    } xen_regions[nr_regions];
+    static unsigned int percpu_data_size;
 
     /* initialize first time */
     if ( !xen_regions[0].s )
     {
         /* S3 resume code (and other real mode trampoline code) */
-        xen_regions[0].s = bootsym_phys(trampoline_start);
-        xen_regions[0].e = bootsym_phys(trampoline_end);
+        xen_regions[region_s3].s = bootsym_phys(trampoline_start);
+        xen_regions[region_s3].e = bootsym_phys(trampoline_end);
         /* hypervisor code + data */
-        xen_regions[1].s =__pa(&_stext);
-        xen_regions[1].e = __pa(&__init_begin);
+        xen_regions[region_text].s =__pa(&_stext);
+        xen_regions[region_text].e = __pa(&__init_begin);
         /* per-cpu data */
-        xen_regions[2].s = __pa(&__per_cpu_start);
-        xen_regions[2].e = xen_regions[2].s +
+        xen_regions[region_percpu].s = __pa(&__per_cpu_start);
+        xen_regions[region_percpu].e = xen_regions[2].s +
             (((paddr_t)last_cpu(cpu_possible_map) + 1) << PERCPU_SHIFT);
+        percpu_data_size = __per_cpu_data_end - __per_cpu_start;
+        percpu_data_size = (percpu_data_size + PAGE_SIZE - 1) & PAGE_MASK;
         /* bss */
-        xen_regions[3].s = __pa(&__bss_start);
-        xen_regions[3].e = __pa(&_end);
-    }
-
-    for ( i = 0; i < ARRAY_SIZE(xen_regions); i++ )
-    {
+        xen_regions[region_bss].s = __pa(&__bss_start);
+        xen_regions[region_bss].e = __pa(&_end);
+    }
+
+    for ( i = 0; i < nr_regions; i++ )
         if ( (start < xen_regions[i].e) && (end > xen_regions[i].s) )
-            return 1;
-    }
+            return ((i != region_percpu) ||
+                    ((start & (PERCPU_SIZE - 1)) < percpu_data_size));
 
     return 0;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.