[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/8] xen/arm: introduce early_ioremap



Introduce a function to map a range of physical memory into Xen virtual
memory.
It doesn't need domheap to be setup.
It is going to be used to map the videoram.

Add flush_xen_data_tlb_range, that flushes a range of virtual addresses.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/arm/mm.c            |   32 ++++++++++++++++++++++++++++++++
 xen/include/asm-arm/config.h |    2 ++
 xen/include/asm-arm/mm.h     |    3 ++-
 xen/include/asm-arm/page.h   |   23 +++++++++++++++++++++++
 4 files changed, 59 insertions(+), 1 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 855f83d..0d7a163 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -367,6 +367,38 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t 
pe)
     frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pages * sizeof(struct 
page_info));
 }
 
+/* Map the physical memory range start -  start + len into virtual
+ * memory and return the virtual address of the mapping.
+ * start has to be 2MB aligned.
+ * len has to be < EARLY_VMAP_END - EARLY_VMAP_START.
+ */
+void* early_ioremap(paddr_t start, size_t len, unsigned attributes)
+{
+    static unsigned long virt_start = EARLY_VMAP_START;
+    void* ret_addr = (void *)virt_start;
+    paddr_t end = start + len;
+
+    ASSERT(!(start & (~SECOND_MASK)));
+    ASSERT(!(virt_start & (~SECOND_MASK)));
+
+    /* The range we need to map is too big */
+    if ( virt_start + len >= EARLY_VMAP_END )
+        return NULL;
+
+    while ( start < end )
+    {
+        lpae_t e = mfn_to_xen_entry(start >> PAGE_SHIFT);
+        e.pt.ai = attributes;
+        write_pte(xen_second + second_table_offset(virt_start), e);
+
+        start += SECOND_SIZE;
+        virt_start += SECOND_SIZE;
+    }
+    flush_xen_data_tlb_range((unsigned long) ret_addr, len);
+
+    return ret_addr;
+}
+
 enum mg { mg_clear, mg_ro, mg_rw, mg_rx };
 static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
 {
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index 2a05539..87db0d1 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -73,9 +73,11 @@
 #define FIXMAP_ADDR(n)        (mk_unsigned_long(0x00400000) + (n) * PAGE_SIZE)
 #define BOOT_MISC_VIRT_START   mk_unsigned_long(0x00600000)
 #define FRAMETABLE_VIRT_START  mk_unsigned_long(0x02000000)
+#define EARLY_VMAP_START       mk_unsigned_long(0x10000000)
 #define XENHEAP_VIRT_START     mk_unsigned_long(0x40000000)
 #define DOMHEAP_VIRT_START     mk_unsigned_long(0x80000000)
 
+#define EARLY_VMAP_END         XENHEAP_VIRT_START
 #define HYPERVISOR_VIRT_START  XEN_VIRT_START
 
 #define DOMHEAP_ENTRIES        1024  /* 1024 2MB mapping slots */
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index e95ece1..4ed5df6 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -150,7 +150,8 @@ extern void setup_frametable_mappings(paddr_t ps, paddr_t 
pe);
 extern void set_fixmap(unsigned map, unsigned long mfn, unsigned attributes);
 /* Remove a mapping from a fixmap entry */
 extern void clear_fixmap(unsigned map);
-
+/* map a 2MB aligned physical range in virtual memory. */
+void* early_ioremap(paddr_t start, size_t len, unsigned attributes);
 
 #define mfn_valid(mfn)        ({                                              \
     unsigned long __m_f_n = (mfn);                                            \
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index d89261e..0790dda 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -328,6 +328,23 @@ static inline void flush_xen_data_tlb_va(unsigned long va)
                  : : "r" (va) : "memory");
 }
 
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB. This is not
+ * sufficient when changing code mappings or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range(unsigned long va, unsigned long 
size)
+{
+    unsigned long end = va + size;
+    while ( va < end ) {
+        asm volatile("dsb;" /* Ensure preceding are visible */
+                STORE_CP32(0, TLBIMVAH)
+                "dsb;" /* Ensure completion of the TLB flush */
+                "isb;"
+                : : "r" (va) : "memory");
+        va += PAGE_SIZE;
+    }
+}
+
 /* Flush all non-hypervisor mappings from the TLB */
 static inline void flush_guest_tlb(void)
 {
@@ -418,8 +435,14 @@ static inline uint64_t gva_to_ipa(uint32_t va)
 #define LPAE_ENTRY_MASK (LPAE_ENTRIES - 1)
 
 #define THIRD_SHIFT  PAGE_SHIFT
+#define THIRD_SIZE   (1u << THIRD_SHIFT)
+#define THIRD_MASK   (~(THIRD_SIZE - 1))
 #define SECOND_SHIFT (THIRD_SHIFT + LPAE_SHIFT)
+#define SECOND_SIZE   (1u << SECOND_SHIFT)
+#define SECOND_MASK   (~(SECOND_SIZE - 1))
 #define FIRST_SHIFT  (SECOND_SHIFT + LPAE_SHIFT)
+#define FIRST_SIZE   (1u << FIRST_SHIFT)
+#define FIRST_MASK   (~(FIRST_SIZE - 1))
 
 /* Calculate the offsets into the pagetables for a given VA */
 #define first_linear_offset(va) (va >> FIRST_SHIFT)
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.