[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3] boot allocator: Use arch helper for virt_to_mfn on DIRECTMAP



From: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>

On ARM64, virt_to_mfn uses the hardware for address
translation. So if the virtual address is not mapped translation
fault is raised. On ARM64, DIRECTMAP_VIRT region is direct mapped.

On ARM platforms with NUMA, While initializing second memory node,
panic is triggered from init_node_heap() when virt_to_mfn()
is called for DIRECTMAP_VIRT region address.
Here the check is made to ensure that MFN less than max MFN mapped.
The max MFN is found by calling virt_to_mfn of DIRECTMAP_VIRT_END
region. Since DIRECMAP_VIRT region is not mapped to any virtual address
on ARM, it fails.

In this patch, instead of calling virt_to_mfn(), arch helper
arch_mfn_below_directmap_max_mfn() is introduced. On ARM64 this arch helper
will return true, whereas on ARM DIRECTMAP_VIRT region is not directly mapped
only xenheap region is directly mapped. So on ARM return false always.
For x86 this helper does virt_to_mfn.

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@xxxxxxxxxx>

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 42c20cb..85322cd 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -520,9 +520,6 @@ static unsigned long init_node_heap(int node, unsigned long 
mfn,
     unsigned long needed = (sizeof(**_heap) +
                             sizeof(**avail) * NR_ZONES +
                             PAGE_SIZE - 1) >> PAGE_SHIFT;
-#ifdef DIRECTMAP_VIRT_END
-    unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
-#endif
     int i, j;
 
     if ( !first_node_initialised )
@@ -534,7 +531,7 @@ static unsigned long init_node_heap(int node, unsigned long 
mfn,
     }
 #ifdef DIRECTMAP_VIRT_END
     else if ( *use_tail && nr >= needed &&
-              (mfn + nr) <= (virt_to_mfn(eva - 1) + 1) &&
+              arch_mfn_below_directmap_max_mfn(mfn + nr) &&
               (!xenheap_bits ||
                !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
@@ -543,7 +540,7 @@ static unsigned long init_node_heap(int node, unsigned long 
mfn,
                       PAGE_SIZE - sizeof(**avail) * NR_ZONES;
     }
     else if ( nr >= needed &&
-              (mfn + needed) <= (virt_to_mfn(eva - 1) + 1) &&
+              arch_mfn_below_directmap_max_mfn(mfn + needed) &&
               (!xenheap_bits ||
                !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
diff --git a/xen/include/asm-arm/arm32/mm.h b/xen/include/asm-arm/arm32/mm.h
new file mode 100644
index 0000000..85b2388
--- /dev/null
+++ b/xen/include/asm-arm/arm32/mm.h
@@ -0,0 +1,20 @@
+#ifndef __ARM_ARM32_MM_H__
+#define __ARM_ARM32_MM_H__
+
+/* On ARM only xenheap memory is directly mapped. Hence return false. */
+static inline bool_t arch_mfn_below_directmap_max_mfn(unsigned long mfn)
+{
+    return false;
+}
+
+#endif /* __ARM_ARM32_MM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/mm.h b/xen/include/asm-arm/arm64/mm.h
new file mode 100644
index 0000000..98c6fc7
--- /dev/null
+++ b/xen/include/asm-arm/arm64/mm.h
@@ -0,0 +1,20 @@
+#ifndef __ARM_ARM64_MM_H__
+#define __ARM_ARM64_MM_H__
+
+/* On ARM64 DIRECTMAP_VIRT region is directly mapped. Hence return true */
+static inline bool_t arch_mfn_below_directmap_max_mfn(unsigned long mfn)
+{
+    return true;
+}
+
+#endif /* __ARM_ARM64_MM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 4892155..5a802cc 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -6,6 +6,14 @@
 #include <public/xen.h>
 #include <xen/pdx.h>
 
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/mm.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/mm.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 /* Align Xen to a 2 MiB boundary. */
 #define XEN_PADDR_ALIGN (1 << 21)
 
@@ -254,7 +262,6 @@ static inline int gvirt_to_maddr(vaddr_t va, paddr_t *pa, 
unsigned int flags)
 #define virt_to_mfn(va)   (virt_to_maddr(va) >> PAGE_SHIFT)
 #define mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
 
-
 /* Convert between Xen-heap virtual addresses and page-info structures. */
 static inline struct page_info *virt_to_page(const void *v)
 {
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 46faffc..e0c31b6 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -18,6 +18,7 @@
 #ifndef __ASSEMBLY__
 # include <asm/types.h>
 # include <xen/lib.h>
+# include <xen/kernel.h>
 #endif
 
 #include <asm/x86_64/page.h>
@@ -374,6 +375,17 @@ perms_strictly_increased(uint32_t old_flags, uint32_t 
new_flags)
     return ((of | (of ^ nf)) == nf);
 }
 
+/*
+ * x86 maps DIRECTMAP_VIRT to physical memory. Get the mfn for directmap
+ * memory region.
+ */
+static inline bool_t arch_mfn_below_directmap_max_mfn(unsigned long mfn)
+{
+    unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
+
+    return (mfn <= (virt_to_mfn(eva - 1) + 1)) ? true : false;
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.