[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 for-4.9 2/5] xen/arm: mm: Move create_mappings function earlier in the file



This function will be called by other function later one. This will
avoid forward declaration and keep the new function close to sibling
ones.

This was moved just after *_fixmap helpers as they are page table
handling functions too.

Signed-off-by: Julien Grall <julien.grall@xxxxxxx>

---
    Changes in v2:
        - Patch added
---
 xen/arch/arm/mm.c | 68 +++++++++++++++++++++++++++----------------------------
 1 file changed, 34 insertions(+), 34 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index f0a2eddaaf..bc65c0e432 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -269,6 +269,40 @@ void clear_fixmap(unsigned map)
     flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
 }
 
+/* Create Xen's mappings of memory.
+ * Mapping_size must be either 2MB or 32MB.
+ * Base and virt must be mapping_size aligned.
+ * Size must be a multiple of mapping_size.
+ * second must be a contiguous set of second level page tables
+ * covering the region starting at virt_offset. */
+static void __init create_mappings(lpae_t *second,
+                                   unsigned long virt_offset,
+                                   unsigned long base_mfn,
+                                   unsigned long nr_mfns,
+                                   unsigned int mapping_size)
+{
+    unsigned long i, count;
+    const unsigned long granularity = mapping_size >> PAGE_SHIFT;
+    lpae_t pte, *p;
+
+    ASSERT((mapping_size == MB(2)) || (mapping_size == MB(32)));
+    ASSERT(!((virt_offset >> PAGE_SHIFT) % granularity));
+    ASSERT(!(base_mfn % granularity));
+    ASSERT(!(nr_mfns % granularity));
+
+    count = nr_mfns / LPAE_ENTRIES;
+    p = second + second_linear_offset(virt_offset);
+    pte = mfn_to_xen_entry(base_mfn, WRITEALLOC);
+    if ( granularity == 16 * LPAE_ENTRIES )
+        pte.pt.contig = 1;  /* These maps are in 16-entry contiguous chunks. */
+    for ( i = 0; i < count; i++ )
+    {
+        write_pte(p + i, pte);
+        pte.pt.base += 1 << LPAE_SHIFT;
+    }
+    flush_xen_data_tlb_local();
+}
+
 #ifdef CONFIG_DOMAIN_PAGE
 void *map_domain_page_global(mfn_t mfn)
 {
@@ -633,40 +667,6 @@ void mmu_init_secondary_cpu(void)
     flush_xen_text_tlb_local();
 }
 
-/* Create Xen's mappings of memory.
- * Mapping_size must be either 2MB or 32MB.
- * Base and virt must be mapping_size aligned.
- * Size must be a multiple of mapping_size.
- * second must be a contiguous set of second level page tables
- * covering the region starting at virt_offset. */
-static void __init create_mappings(lpae_t *second,
-                                   unsigned long virt_offset,
-                                   unsigned long base_mfn,
-                                   unsigned long nr_mfns,
-                                   unsigned int mapping_size)
-{
-    unsigned long i, count;
-    const unsigned long granularity = mapping_size >> PAGE_SHIFT;
-    lpae_t pte, *p;
-
-    ASSERT((mapping_size == MB(2)) || (mapping_size == MB(32)));
-    ASSERT(!((virt_offset >> PAGE_SHIFT) % granularity));
-    ASSERT(!(base_mfn % granularity));
-    ASSERT(!(nr_mfns % granularity));
-
-    count = nr_mfns / LPAE_ENTRIES;
-    p = second + second_linear_offset(virt_offset);
-    pte = mfn_to_xen_entry(base_mfn, WRITEALLOC);
-    if ( granularity == 16 * LPAE_ENTRIES )
-        pte.pt.contig = 1;  /* These maps are in 16-entry contiguous chunks. */
-    for ( i = 0; i < count; i++ )
-    {
-        write_pte(p + i, pte);
-        pte.pt.base += 1 << LPAE_SHIFT;
-    }
-    flush_xen_data_tlb_local();
-}
-
 #ifdef CONFIG_ARM_32
 /* Set up the xenheap: up to 1GB of contiguous, always-mapped memory. */
 void __init setup_xenheap_mappings(unsigned long base_mfn,
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.