[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen: arm64: Handle memory banks which are not 1GB aligned



commit 4d57153b52a36183d58e8de6ba613929f906386a
Author:     Ian Campbell <ian.campbell@xxxxxxxxxx>
AuthorDate: Fri Oct 10 15:43:14 2014 +0100
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Tue Oct 14 11:59:00 2014 +0100

    xen: arm64: Handle memory banks which are not 1GB aligned
    
    The code in the arm64 version of setup_xenheap_mappings was making
    some very confused attempts to handle this but was bogus.
    
    As well as adjusting the mapping to start on a 1GB boundary we also
    need to account for the offset between the start of the mapping and
    the actual start of the heap when converting between page pointers,
    virtual addresses and machine addresses.
    
    I preferred to do this by explicitly accounting for the offset rather
    than adding an offset to the frametable because that approach could
    potentially waste a large amount of frametable (up to just less than
    1GB worth) but also because of issues with converting mfns from
    outside the regions considered for pdx initialisation (which are not
    1GB aligned) back and forth.
    
    We already have an idea of the distinction between the start of the
    direct map and the start of the xenheap in the difference between
    DIRECTMAP_VIRT_START and XENHEAP_VIRT_START. Until now these were the
    same thing, but now we change XENHEAP_VIRT_START to point to the
    actual start of heap not the mapping. Surprisingly there was only one
    place which was using the conceptually wrong value.
    
    Also change xenheap_virt_end to a vaddr_t for consistency.
    
    We've been lucky so far that most hardware happens to locate memory
    on a 1GB boundary (we did have reports of a system with memory at a
    half gig boundary which exhibited failures which I didn't manage to
    follow up on successfully). The EFI support has exposed this
    shortcoming by the way it handles reserved memory, which has a
    similar effect to having memory non-1GB aligned.
    
    arm32 does things differently here due to using a small Xen heap and
    a demand mapped domain heap, so isn't affected.
    
    Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Cc: Suravee Suthikulanit <suravee.suthikulpanit@xxxxxxx>
    Cc: Roy Franz <roy.franz@xxxxxxxxxx>
    Cc: Vijay Kilari <vijay.kilari@xxxxxxxxx>
    Reviewed-by: Julien Grall <julien.grall@xxxxxxxxxx>
    Tested-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
    Roy Franz <roy.franz@xxxxxxxxxx>
---
 xen/arch/arm/mm.c            |   36 +++++++++++++++++++++++++-----------
 xen/include/asm-arm/config.h |    2 +-
 xen/include/asm-arm/mm.h     |    7 +++++--
 3 files changed, 31 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index c5b48ef..97e5bc3 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -138,7 +138,10 @@ static paddr_t phys_offset;
 /* Limits of the Xen heap */
 unsigned long xenheap_mfn_start __read_mostly = ~0UL;
 unsigned long xenheap_mfn_end __read_mostly;
-unsigned long xenheap_virt_end __read_mostly;
+vaddr_t xenheap_virt_end __read_mostly;
+#ifdef CONFIG_ARM_64
+vaddr_t xenheap_virt_start __read_mostly;
+#endif
 
 unsigned long frametable_base_pdx __read_mostly;
 unsigned long frametable_virt_end __read_mostly;
@@ -155,7 +158,11 @@ static inline void 
check_memory_layout_alignment_constraints(void) {
     BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
     BUILD_BUG_ON(BOOT_RELOC_VIRT_START & ~SECOND_MASK);
     /* 1GB aligned regions */
+#ifdef CONFIG_ARM_32
     BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK);
+#else
+    BUILD_BUG_ON(DIRECTMAP_VIRT_START & ~FIRST_MASK);
+#endif
     /* Page table structure constraints */
 #ifdef CONFIG_ARM_64
     BUILD_BUG_ON(zeroeth_table_offset(XEN_VIRT_START));
@@ -665,12 +672,19 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
                                    unsigned long nr_mfns)
 {
     lpae_t *first, pte;
-    unsigned long offset, end_mfn;
+    unsigned long mfn, end_mfn;
     vaddr_t vaddr;
 
-    /* First call sets the xenheap physical offset. */
+    /* Align to previous 1GB boundary */
+    mfn = base_mfn & ~((FIRST_SIZE>>PAGE_SHIFT)-1);
+
+    /* First call sets the xenheap physical and virtual offset. */
     if ( xenheap_mfn_start == ~0UL )
+    {
         xenheap_mfn_start = base_mfn;
+        xenheap_virt_start = DIRECTMAP_VIRT_START +
+            (base_mfn - mfn) * PAGE_SIZE;
+    }
 
     if ( base_mfn < xenheap_mfn_start )
         panic("cannot add xenheap mapping at %lx below heap start %lx",
@@ -678,13 +692,13 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
 
     end_mfn = base_mfn + nr_mfns;
 
-    /* Align to previous 1GB boundary */
-    base_mfn &= ~((FIRST_SIZE>>PAGE_SHIFT)-1);
-
-    offset = pfn_to_pdx(base_mfn - xenheap_mfn_start);
-    vaddr = DIRECTMAP_VIRT_START + offset*PAGE_SIZE;
+    /*
+     * Virtual address aligned to previous 1GB to match physical
+     * address alignment done above.
+     */
+    vaddr = (vaddr_t)mfn_to_virt(base_mfn) & FIRST_MASK;
 
-    while ( base_mfn < end_mfn )
+    while ( mfn < end_mfn )
     {
         int slot = zeroeth_table_offset(vaddr);
         lpae_t *p = &xen_pgtable[slot];
@@ -716,11 +730,11 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
             first = mfn_to_virt(first_mfn);
         }
 
-        pte = mfn_to_xen_entry(base_mfn, WRITEALLOC);
+        pte = mfn_to_xen_entry(mfn, WRITEALLOC);
         /* TODO: Set pte.pt.contig when appropriate. */
         write_pte(&first[first_table_offset(vaddr)], pte);
 
-        base_mfn += FIRST_SIZE>>PAGE_SHIFT;
+        mfn += FIRST_SIZE>>PAGE_SHIFT;
         vaddr += FIRST_SIZE;
     }
 
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index 59b2887..264e2c1 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -162,7 +162,7 @@
 #define DIRECTMAP_SIZE         (SLOT0_ENTRY_SIZE * (265-256))
 #define DIRECTMAP_VIRT_END     (DIRECTMAP_VIRT_START + DIRECTMAP_SIZE - 1)
 
-#define XENHEAP_VIRT_START     DIRECTMAP_VIRT_START
+#define XENHEAP_VIRT_START     xenheap_virt_start
 
 #define HYPERVISOR_VIRT_END    DIRECTMAP_VIRT_END
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 1e4711c..d25e485 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -110,7 +110,10 @@ struct page_info
 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
 
 extern unsigned long xenheap_mfn_start, xenheap_mfn_end;
-extern unsigned long xenheap_virt_end;
+extern vaddr_t xenheap_virt_end;
+#ifdef CONFIG_ARM_64
+extern vaddr_t xenheap_virt_start;
+#endif
 
 #ifdef CONFIG_ARM_32
 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
@@ -227,7 +230,7 @@ static inline void *maddr_to_virt(paddr_t ma)
 static inline void *maddr_to_virt(paddr_t ma)
 {
     ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
-    return (void *)(DIRECTMAP_VIRT_START -
+    return (void *)(XENHEAP_VIRT_START -
                     pfn_to_paddr(xenheap_mfn_start) +
                     ((ma & ma_va_bottom_mask) |
                      ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.