[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 4/7] x86/mm: put HVM only code under CONFIG_HVM



Going through the code, HAP, EPT, PoD and ALTP2M depend on HVM code.
Put these components under CONFIG_HVM. This further requires putting
one of the vm event under CONFIG_HVM.

Altp2m requires a bit more attention because its code is embedded in
generic x86 p2m code.

Also make hap_enabled evaluate to false when !CONFIG_HVM. Make sure it
evaluate its parameter to avoid unused variable warnings in its users.

Also sort items in Makefile while at it.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v4: provide stub for p2m_altp2m_check

Razvan's ack is dropped because of the change. An ack from altp2m
maintainers is required.
---
 xen/arch/x86/mm/Makefile         | 11 ++++++-----
 xen/arch/x86/mm/mem_access.c     | 18 +++++++++++++++++-
 xen/arch/x86/mm/mem_sharing.c    |  2 ++
 xen/arch/x86/mm/p2m.c            | 23 ++++++++++++-----------
 xen/include/asm-x86/altp2m.h     | 13 ++++++++++++-
 xen/include/asm-x86/domain.h     |  2 +-
 xen/include/asm-x86/hvm/domain.h |  4 ++++
 xen/include/asm-x86/p2m.h        |  8 +++++++-
 8 files changed, 61 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/mm/Makefile b/xen/arch/x86/mm/Makefile
index 3017119..171cc74 100644
--- a/xen/arch/x86/mm/Makefile
+++ b/xen/arch/x86/mm/Makefile
@@ -1,15 +1,16 @@
 subdir-y += shadow
-subdir-y += hap
+subdir-$(CONFIG_HVM) += hap
 
-obj-y += paging.o
-obj-y += p2m.o p2m-pt.o p2m-ept.o p2m-pod.o
-obj-y += altp2m.o
+obj-$(CONFIG_HVM) += altp2m.o
 obj-y += guest_walk_2.o
 obj-y += guest_walk_3.o
 obj-y += guest_walk_4.o
+obj-$(CONFIG_MEM_ACCESS) += mem_access.o
 obj-y += mem_paging.o
 obj-y += mem_sharing.o
-obj-y += mem_access.o
+obj-y += p2m.o p2m-pt.o
+obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
+obj-y += paging.o
 
 guest_walk_%.o: guest_walk.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index c980f17..6801841 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -246,7 +246,6 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
     /* Return whether vCPU pause is required (aka. sync event) */
     return (p2ma != p2m_access_n2rwx);
 }
-#endif
 
 int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
                               struct p2m_domain *ap2m, p2m_access_t a,
@@ -291,6 +290,7 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct 
p2m_domain *hp2m,
      */
     return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
 }
+#endif
 
 static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
                           struct p2m_domain *ap2m, p2m_access_t a,
@@ -298,6 +298,7 @@ static int set_mem_access(struct domain *d, struct 
p2m_domain *p2m,
 {
     int rc = 0;
 
+#ifdef CONFIG_HVM
     if ( ap2m )
     {
         rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
@@ -306,6 +307,9 @@ static int set_mem_access(struct domain *d, struct 
p2m_domain *p2m,
             rc = 0;
     }
     else
+#else
+    ASSERT(!ap2m);
+#endif
     {
         mfn_t mfn;
         p2m_access_t _a;
@@ -367,6 +371,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
uint32_t nr,
     long rc = 0;
 
     /* altp2m view 0 is treated as the hostp2m */
+#ifdef CONFIG_HVM
     if ( altp2m_idx )
     {
         if ( altp2m_idx >= MAX_ALTP2M ||
@@ -375,6 +380,9 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
uint32_t nr,
 
         ap2m = d->arch.altp2m_p2m[altp2m_idx];
     }
+#else
+    ASSERT(!altp2m_idx);
+#endif
 
     if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
         return -EINVAL;
@@ -422,6 +430,7 @@ long p2m_set_mem_access_multi(struct domain *d,
     long rc = 0;
 
     /* altp2m view 0 is treated as the hostp2m */
+#ifdef CONFIG_HVM
     if ( altp2m_idx )
     {
         if ( altp2m_idx >= MAX_ALTP2M ||
@@ -430,6 +439,9 @@ long p2m_set_mem_access_multi(struct domain *d,
 
         ap2m = d->arch.altp2m_p2m[altp2m_idx];
     }
+#else
+    ASSERT(!altp2m_idx);
+#endif
 
     p2m_lock(p2m);
     if ( ap2m )
@@ -483,12 +495,15 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn, 
xenmem_access_t *access)
 
 void arch_p2m_set_access_required(struct domain *d, bool access_required)
 {
+#ifdef CONFIG_HVM
     unsigned int i;
+#endif
 
     ASSERT(atomic_read(&d->pause_count));
 
     p2m_get_hostp2m(d)->access_required = access_required;
 
+#ifdef CONFIG_HVM
     if ( !altp2m_active(d) )
         return;
 
@@ -499,6 +514,7 @@ void arch_p2m_set_access_required(struct domain *d, bool 
access_required)
         if ( p2m )
             p2m->access_required = access_required;
     }
+#endif
 }
 
 /*
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index d04f9c7..349e6fd 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -802,6 +802,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
     if ( !p2m_is_sharable(p2mt) )
         goto out;
 
+#ifdef CONFIG_HVM
     /* Check if there are mem_access/remapped altp2m entries for this page */
     if ( altp2m_active(d) )
     {
@@ -829,6 +830,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
 
         altp2m_list_unlock(d);
     }
+#endif
 
     /* Try to convert the mfn to the sharable type */
     page = mfn_to_page(mfn);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4169084..963fde9 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -196,7 +196,6 @@ static int p2m_init_nestedp2m(struct domain *d)
 
     return 0;
 }
-#endif
 
 static void p2m_teardown_altp2m(struct domain *d)
 {
@@ -235,6 +234,7 @@ static int p2m_init_altp2m(struct domain *d)
 
     return 0;
 }
+#endif
 
 int p2m_init(struct domain *d)
 {
@@ -254,16 +254,14 @@ int p2m_init(struct domain *d)
         p2m_teardown_hostp2m(d);
         return rc;
     }
-#endif
 
     rc = p2m_init_altp2m(d);
     if ( rc )
     {
         p2m_teardown_hostp2m(d);
-#ifdef CONFIG_HVM
         p2m_teardown_nestedp2m(d);
-#endif
     }
+#endif
 
     return rc;
 }
@@ -709,12 +707,12 @@ void p2m_teardown(struct p2m_domain *p2m)
 
 void p2m_final_teardown(struct domain *d)
 {
+#ifdef CONFIG_HVM
     /*
      * We must teardown both of them unconditionally because
      * we initialise them unconditionally.
      */
     p2m_teardown_altp2m(d);
-#ifdef CONFIG_HVM
     p2m_teardown_nestedp2m(d);
 #endif
 
@@ -1736,12 +1734,6 @@ void p2m_mem_paging_resume(struct domain *d, 
vm_event_response_t *rsp)
     }
 }
 
-void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
-{
-    if ( altp2m_active(v->domain) )
-        p2m_switch_vcpu_altp2m_by_id(v, idx);
-}
-
 #ifdef CONFIG_HVM
 static struct p2m_domain *
 p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
@@ -2191,6 +2183,14 @@ int unmap_mmio_regions(struct domain *d,
     return i == nr ? 0 : i ?: ret;
 }
 
+#ifdef CONFIG_HVM
+
+void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
+{
+    if ( altp2m_active(v->domain) )
+        p2m_switch_vcpu_altp2m_by_id(v, idx);
+}
+
 bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
 {
     struct domain *d = v->domain;
@@ -2568,6 +2568,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t 
gfn,
 
     return ret;
 }
+#endif /* CONFIG_HVM */
 
 /*** Audit ***/
 
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index 64c7618..41fdd82 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -18,12 +18,14 @@
 #ifndef __ASM_X86_ALTP2M_H
 #define __ASM_X86_ALTP2M_H
 
+#ifdef CONFIG_HVM
+
 #include <xen/types.h>
 #include <xen/sched.h>         /* for struct vcpu, struct domain */
 #include <asm/hvm/vcpu.h>      /* for vcpu_altp2m */
 
 /* Alternate p2m HVM on/off per domain */
-static inline bool_t altp2m_active(const struct domain *d)
+static inline bool altp2m_active(const struct domain *d)
 {
     return d->arch.altp2m_active;
 }
@@ -37,5 +39,14 @@ static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
 {
     return vcpu_altp2m(v).p2midx;
 }
+#else
+
+static inline bool altp2m_active(const struct domain *d)
+{
+    return false;
+}
+
+uint16_t altp2m_vcpu_idx(const struct vcpu *v);
+#endif
 
 #endif /* __ASM_X86_ALTP2M_H */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index b46cfb0..cb0721e 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -337,13 +337,13 @@ struct arch_domain
     /* nestedhvm: translate l2 guest physical to host physical */
     struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
     mm_lock_t nested_p2m_lock;
-#endif
 
     /* altp2m: allow multiple copies of host p2m */
     bool_t altp2m_active;
     struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
     mm_lock_t altp2m_list_lock;
     uint64_t *altp2m_eptp;
+#endif
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     struct radix_tree_root irq_pirq;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index fa7ebb9..172d27f 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -194,7 +194,11 @@ struct hvm_domain {
     };
 };
 
+#ifdef CONFIG_HVM
 #define hap_enabled(d)  ((d)->arch.hvm.hap_enabled)
+#else
+#define hap_enabled(d)  ({(void)(d); false;})
+#endif
 
 #endif /* __ASM_X86_HVM_DOMAIN_H__ */
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 20cf3f1..1db603b 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -231,8 +231,10 @@ struct p2m_domain {
      * host p2m's lock. */
     int                defer_nested_flush;
 
+#ifdef CONFIG_HVM
     /* Alternate p2m: count of vcpu's currently using this p2m. */
     atomic_t           active_vcpus;
+#endif
 
     /* Pages used to construct the p2m */
     struct page_list_head pages;
@@ -823,7 +825,7 @@ void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, 
unsigned long gfn,
 /*
  * Alternate p2m: shadow p2m tables used for alternate memory views
  */
-
+#ifdef CONFIG_HVM
 /* get current alternate p2m table */
 static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
 {
@@ -870,6 +872,10 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int 
idx,
 int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
                                 mfn_t mfn, unsigned int page_order,
                                 p2m_type_t p2mt, p2m_access_t p2ma);
+#else
+struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
+static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) {}
+#endif
 
 /*
  * p2m type to IOMMU flags
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.