[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Disable set_gpfn_from_mfn until m2p table is allocated.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1307690313 -3600
# Node ID d1309a79bde846720acd4fd7f6d7cad552662c3c
# Parent  5a557fda70a99d287735b53957e865c8dac0e351
x86: Disable set_gpfn_from_mfn until m2p table is allocated.

This is a prerequisite for calling set_gpfn_from_mfn() unconditionally
from free_heap_pages().

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 5a557fda70a9 -r d1309a79bde8 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Fri Jun 10 08:08:44 2011 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Fri Jun 10 08:18:33 2011 +0100
@@ -47,6 +47,8 @@
 
 unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
 
+bool_t __read_mostly machine_to_phys_mapping_valid = 0;
+
 /* Top-level master (and idle-domain) page directory. */
 l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
     idle_pg_table[L4_PAGETABLE_ENTRIES];
@@ -800,6 +802,8 @@
 #undef CNT
 #undef MFN
 
+    machine_to_phys_mapping_valid = 1;
+
     /* Set up linear page table mapping. */
     l4e_write(&idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)],
               l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR));
diff -r 5a557fda70a9 -r d1309a79bde8 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Fri Jun 10 08:08:44 2011 +0100
+++ b/xen/include/asm-x86/mm.h  Fri Jun 10 08:18:33 2011 +0100
@@ -470,7 +470,7 @@
 
 #ifdef CONFIG_COMPAT
 #define compat_machine_to_phys_mapping ((unsigned int 
*)RDWR_COMPAT_MPT_VIRT_START)
-#define set_gpfn_from_mfn(mfn, pfn) ({                         \
+#define _set_gpfn_from_mfn(mfn, pfn) ({                        \
     struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
     unsigned long entry = (d && (d == dom_cow)) ?              \
         SHARED_M2P_ENTRY : (pfn);                              \
@@ -479,7 +479,7 @@
      machine_to_phys_mapping[(mfn)] = (entry));                \
     })
 #else
-#define set_gpfn_from_mfn(mfn, pfn) ({                         \
+#define _set_gpfn_from_mfn(mfn, pfn) ({                        \
     struct domain *d = page_get_owner(__mfn_to_page(mfn));     \
     if(d && (d == dom_cow))                                    \
         machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY;     \
@@ -487,6 +487,17 @@
         machine_to_phys_mapping[(mfn)] = (pfn);                \
     })
 #endif
+
+/*
+ * Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until
+ * the machine_to_phys_mapping is actually set up.
+ */
+extern bool_t machine_to_phys_mapping_valid;
+#define set_gpfn_from_mfn(mfn, pfn) do {        \
+    if ( machine_to_phys_mapping_valid )        \
+        _set_gpfn_from_mfn(mfn, pfn);           \
+} while (0)
+
 #define get_gpfn_from_mfn(mfn)      (machine_to_phys_mapping[(mfn)])
 
 #define mfn_to_gmfn(_d, mfn)                            \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.