[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 02/17] x86/mm: make mm.c build with !CONFIG_PV



Put PV only code under CONFIG_PV.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/mm.c | 55 ++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 46 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 02abd06..f211383 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -625,6 +625,7 @@ const char __section(".bss.page_aligned.const") 
__aligned(PAGE_SIZE)
     zero_page[PAGE_SIZE];
 
 
+#ifdef CONFIG_PV
 static int alloc_segdesc_page(struct page_info *page)
 {
     const struct domain *owner = page_get_owner(page);
@@ -639,6 +640,7 @@ static int alloc_segdesc_page(struct page_info *page)
 
     return i == 512 ? 0 : -EINVAL;
 }
+#endif
 
 static int _get_page_type(struct page_info *page, unsigned long type,
                           bool preemptible);
@@ -663,15 +665,6 @@ static int get_page_and_type_from_mfn(
     return rc;
 }
 
-static void put_data_page(
-    struct page_info *page, int writeable)
-{
-    if ( writeable )
-        put_page_and_type(page);
-    else
-        put_page(page);
-}
-
 #ifdef CONFIG_PV_LINEAR_PT
 
 static bool inc_linear_entries(struct page_info *pg)
@@ -1129,6 +1122,8 @@ get_page_from_l1e(
     return -EBUSY;
 }
 
+#ifdef CONFIG_PV
+
 define_get_linear_pagetable(l2);
 static int
 get_page_from_l2e(
@@ -1197,6 +1192,8 @@ get_page_from_l4e(
     return rc;
 }
 
+#endif /* CONFIG_PV */
+
 static int _put_page_type(struct page_info *page, bool preemptible,
                           struct page_info *ptpg);
 
@@ -1277,6 +1274,8 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain 
*l1e_owner)
 }
 
 
+#ifdef CONFIG_PV
+
 /*
  * NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'.
  * Note also that this automatically deals correctly with linear p.t.'s.
@@ -1306,6 +1305,15 @@ static int put_page_from_l2e(l2_pgentry_t l2e, unsigned 
long pfn)
     return 0;
 }
 
+static void put_data_page(
+    struct page_info *page, int writeable)
+{
+    if ( writeable )
+        put_page_and_type(page);
+    else
+        put_page(page);
+}
+
 static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn,
                              int partial, bool defer)
 {
@@ -1620,6 +1628,8 @@ void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const 
struct domain *d)
            COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2t));
 }
 
+#endif /* CONFIG_PV */
+
 /*
  * Fill an L4 with Xen entries.
  *
@@ -1726,6 +1736,7 @@ void zap_ro_mpt(mfn_t mfn)
     unmap_domain_page(l4tab);
 }
 
+#ifdef CONFIG_PV
 static int alloc_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
@@ -1916,6 +1927,7 @@ static int free_l4_table(struct page_info *page)
 
     return rc;
 }
+#endif
 
 #ifndef NDEBUG
 /*
@@ -2000,6 +2012,7 @@ void page_unlock(struct page_info *page)
     current_locked_page_set(NULL);
 }
 
+#ifdef CONFIG_PV
 /*
  * PTE flags that a guest may change without re-validating the PTE.
  * All other bits affect translation, caching, or Xen's safety.
@@ -2311,6 +2324,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
     put_page_from_l4e(ol4e, pfn, 0, 1);
     return rc;
 }
+#endif /* CONFIG_PV */
 
 static int cleanup_page_cacheattr(struct page_info *page)
 {
@@ -2418,6 +2432,7 @@ static void get_page_light(struct page_info *page)
 static int alloc_page_type(struct page_info *page, unsigned long type,
                            int preemptible)
 {
+#ifdef CONFIG_PV
     struct domain *owner = page_get_owner(page);
     int rc;
 
@@ -2487,12 +2502,17 @@ static int alloc_page_type(struct page_info *page, 
unsigned long type,
     }
 
     return rc;
+#else
+    ASSERT_UNREACHABLE();
+    return -EINVAL;
+#endif
 }
 
 
 int free_page_type(struct page_info *page, unsigned long type,
                    int preemptible)
 {
+#ifdef CONFIG_PV
     struct domain *owner = page_get_owner(page);
     unsigned long gmfn;
     int rc;
@@ -2541,6 +2561,10 @@ int free_page_type(struct page_info *page, unsigned long 
type,
     }
 
     return rc;
+#else
+    ASSERT_UNREACHABLE();
+    return -EINVAL;
+#endif
 }
 
 
@@ -2931,6 +2955,7 @@ int vcpu_destroy_pagetables(struct vcpu *v)
 
 int new_guest_cr3(mfn_t mfn)
 {
+#ifdef CONFIG_PV
     struct vcpu *curr = current;
     struct domain *d = curr->domain;
     int rc;
@@ -3029,6 +3054,10 @@ int new_guest_cr3(mfn_t mfn)
     }
 
     return rc;
+#else
+    ASSERT_UNREACHABLE();
+    return -EINVAL;
+#endif
 }
 
 static struct domain *get_pg_owner(domid_t domid)
@@ -3643,6 +3672,8 @@ long do_mmuext_op(
     return rc;
 }
 
+#ifdef CONFIG_PV
+
 long do_mmu_update(
     XEN_GUEST_HANDLE_PARAM(mmu_update_t) ureqs,
     unsigned int count,
@@ -3972,6 +4003,8 @@ long do_mmu_update(
     return rc;
 }
 
+#endif
+
 int donate_page(
     struct domain *d, struct page_info *page, unsigned int memflags)
 {
@@ -4078,6 +4111,8 @@ int steal_page(
     return -EINVAL;
 }
 
+#ifdef CONFIG_PV
+
 static int __do_update_va_mapping(
     unsigned long va, u64 val64, unsigned long flags, struct domain *pg_owner)
 {
@@ -4241,6 +4276,8 @@ int compat_update_va_mapping_otherdomain(unsigned int va,
     return rc;
 }
 
+#endif /* CONFIG_PV */
+
 typedef struct e820entry e820entry_t;
 DEFINE_XEN_GUEST_HANDLE(e820entry_t);
 
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.