[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: frame table related improvements


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Wed, 12 Dec 2012 00:55:09 +0000
  • Delivery-date: Wed, 12 Dec 2012 00:55:24 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1355230073 -3600
# Node ID 1a329da132f46ec7753a68a67ad85f6cb18ea2cc
# Parent  03cb71bc32f95b4029843e0254b4448e0c6a817a
x86: frame table related improvements

- fix super page frame table setup for memory hotplug case (should
  create full table, or else the hotplug code would need to do the
  necessary table population)
- simplify super page frame table setup (can re-use frame table setup
  code)
- slightly streamline frame table setup code
- fix (tighten) a BUG_ON() and an ASSERT() condition
- fix spage <-> pdx conversion macros (they had no users so far, and
  hence no-one noticed how broken they were)

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 03cb71bc32f9 -r 1a329da132f4 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Dec 10 11:16:17 2012 +0000
+++ b/xen/arch/x86/mm.c Tue Dec 11 13:47:53 2012 +0100
@@ -182,28 +182,6 @@ static uint32_t base_disallow_mask;
       !is_hvm_domain(d)) ?                                      \
      L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
 
-static void __init init_spagetable(void)
-{
-    unsigned long s, start = SPAGETABLE_VIRT_START;
-    unsigned long end = SPAGETABLE_VIRT_END;
-    unsigned long step, mfn;
-    unsigned int max_entries;
-
-    step = 1UL << PAGETABLE_ORDER;
-    max_entries = (max_pdx + ((1UL<<SUPERPAGE_ORDER)-1)) >> SUPERPAGE_ORDER;
-    end = start + (((max_entries * sizeof(*spage_table)) +
-                    ((1UL<<SUPERPAGE_SHIFT)-1)) & 
(~((1UL<<SUPERPAGE_SHIFT)-1)));
-
-    for (s = start; s < end; s += step << PAGE_SHIFT)
-    {
-        mfn = alloc_boot_pages(step, step);
-        if ( !mfn )
-            panic("Not enough memory for spage table");
-        map_pages_to_xen(s, mfn, step, PAGE_HYPERVISOR);
-    }
-    memset((void *)start, 0, end - start);
-}
-
 static void __init init_frametable_chunk(void *start, void *end)
 {
     unsigned long s = (unsigned long)start;
@@ -232,15 +210,25 @@ static void __init init_frametable_chunk
     }
 
     memset(start, 0, end - start);
-    memset(end, -1, s - (unsigned long)end);
+    memset(end, -1, s - e);
+}
+
+static void __init init_spagetable(void)
+{
+    BUILD_BUG_ON(XEN_VIRT_END > SPAGETABLE_VIRT_START);
+
+    init_frametable_chunk(spage_table,
+                          mem_hotplug ? (void *)SPAGETABLE_VIRT_END
+                                      : pdx_to_spage(max_pdx - 1) + 1);
 }
 
 void __init init_frametable(void)
 {
     unsigned int sidx, eidx, nidx;
     unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT;
-
-    BUILD_BUG_ON(XEN_VIRT_END > FRAMETABLE_VIRT_END);
+    struct page_info *end_pg, *top_pg;
+
+    BUILD_BUG_ON(XEN_VIRT_END > FRAMETABLE_VIRT_START);
     BUILD_BUG_ON(FRAMETABLE_VIRT_START & ((1UL << L2_PAGETABLE_SHIFT) - 1));
 
     for ( sidx = 0; ; sidx = nidx )
@@ -252,17 +240,13 @@ void __init init_frametable(void)
         init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
                               pdx_to_page(eidx * PDX_GROUP_COUNT));
     }
-    if ( !mem_hotplug )
-        init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
-                              pdx_to_page(max_pdx - 1) + 1);
-    else
-    {
-        init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
-                              pdx_to_page(max_idx * PDX_GROUP_COUNT - 1) + 1);
-        memset(pdx_to_page(max_pdx), -1,
-               (unsigned long)pdx_to_page(max_idx * PDX_GROUP_COUNT) -
-               (unsigned long)pdx_to_page(max_pdx));
-    }
+
+    end_pg = pdx_to_page(max_pdx - 1) + 1;
+    top_pg = mem_hotplug ? pdx_to_page(max_idx * PDX_GROUP_COUNT - 1) + 1
+                         : end_pg;
+    init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT), top_pg);
+    memset(end_pg, -1, (unsigned long)top_pg - (unsigned long)end_pg);
+
     if (opt_allow_superpage)
         init_spagetable();
 }
diff -r 03cb71bc32f9 -r 1a329da132f4 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Mon Dec 10 11:16:17 2012 +0000
+++ b/xen/include/asm-x86/mm.h  Tue Dec 11 13:47:53 2012 +0100
@@ -301,7 +301,7 @@ static inline struct page_info *__virt_t
 
 static inline void *__page_to_virt(const struct page_info *pg)
 {
-    ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_VIRT_END);
+    ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_SIZE);
     /*
      * (sizeof(*pg) & -sizeof(*pg)) selects the LS bit of sizeof(*pg). The
      * division and re-multiplication avoids one shift when sizeof(*pg) is a
diff -r 03cb71bc32f9 -r 1a329da132f4 xen/include/asm-x86/x86_64/page.h
--- a/xen/include/asm-x86/x86_64/page.h Mon Dec 10 11:16:17 2012 +0000
+++ b/xen/include/asm-x86/x86_64/page.h Tue Dec 11 13:47:53 2012 +0100
@@ -46,8 +46,8 @@ extern void pfn_pdx_hole_setup(unsigned 
 
 #define page_to_pdx(pg)  ((pg) - frame_table)
 #define pdx_to_page(pdx) (frame_table + (pdx))
-#define spage_to_pdx(spg) ((spg>>(SUPERPAGE_SHIFT-PAGE_SHIFT)) - spage_table)
-#define pdx_to_spage(pdx) (spage_table + ((pdx)<<(SUPERPAGE_SHIFT-PAGE_SHIFT)))
+#define spage_to_pdx(spg) (((spg) - spage_table)<<(SUPERPAGE_SHIFT-PAGE_SHIFT))
+#define pdx_to_spage(pdx) (spage_table + ((pdx)>>(SUPERPAGE_SHIFT-PAGE_SHIFT)))
 /*
  * Note: These are solely for the use by page_{get,set}_owner(), and
  *       therefore don't need to handle the XEN_VIRT_{START,END} range.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.