[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 2/5] x86/p2m/pod: make it build with !CONFIG_HVM



Populate-on-demand is HVM only.

Provide a bunch of stubs for common p2m code and guard one invocation
of guest_physmap_mark_populate_on_demand with is_hvm_domain.

Put relevant fields in p2m_domain and code which touches those fields
under CONFIG_HVM.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
v5:
1. Introduce p2m_pod_entry_count
2. Put p2m_pt_audit_p2m under CONFIG_HVM entirely
3. Move p2m_pod_init to p2m-pod.c

v4:
1. Factor out p2m_pod_init.
2. Put audit_p2m under CONFIG_HVM.
3. Get rid of one local variable to simplify code.

v3: Put pod related fields and code under CONFIG_HVM.
---
 xen/arch/x86/domctl.c     |  2 +-
 xen/arch/x86/mm.c         |  2 ++-
 xen/arch/x86/mm/p2m-pod.c | 11 ++++++++-
 xen/arch/x86/mm/p2m-pt.c  |  4 ++-
 xen/arch/x86/mm/p2m.c     | 15 ++++++------
 xen/common/memory.c       |  3 +-
 xen/common/vm_event.c     |  4 +---
 xen/include/asm-x86/p2m.h | 52 ++++++++++++++++++++++++++++++++++++----
 8 files changed, 76 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 2284128..115ddf6 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1221,7 +1221,7 @@ long arch_do_domctl(
         ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
         break;
 
-#if P2M_AUDIT
+#if P2M_AUDIT && defined(CONFIG_HVM)
     case XEN_DOMCTL_audit_p2m:
         if ( d == currd )
             ret = -EPERM;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index af1440d..c8a97e1 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4619,6 +4619,7 @@ long arch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         return 0;
     }
 
+#ifdef CONFIG_HVM
     case XENMEM_set_pod_target:
     case XENMEM_get_pod_target:
     {
@@ -4675,6 +4676,7 @@ long arch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         rcu_unlock_domain(d);
         return rc;
     }
+#endif
 
     default:
         return subarch_memory_op(cmd, arg);
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index ba37344..29c68a4 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -1333,3 +1333,14 @@ out:
     return rc;
 }
 
+void p2m_pod_init(struct p2m_domain *p2m)
+{
+    unsigned int i;
+
+    mm_lock_init(&p2m->pod.lock);
+    INIT_PAGE_LIST_HEAD(&p2m->pod.super);
+    INIT_PAGE_LIST_HEAD(&p2m->pod.single);
+
+    for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
+        p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
+}
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index b8c5d2e..40bfc76 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -967,7 +967,7 @@ static int p2m_pt_change_entry_type_range(struct p2m_domain 
*p2m,
     return err;
 }
 
-#if P2M_AUDIT
+#if P2M_AUDIT && defined(CONFIG_HVM)
 long p2m_pt_audit_p2m(struct p2m_domain *p2m)
 {
     unsigned long entry_count = 0, pmbad = 0;
@@ -1116,6 +1116,8 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
 
     return pmbad;
 }
+#else
+# define p2m_pt_audit_p2m NULL
 #endif /* P2M_AUDIT */
 
 /* Set up the p2m function pointers for pagetable format */
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 6020553..10ff543 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -52,15 +52,11 @@ DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
 /* Init the datastructures for later use by the p2m code */
 static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
 {
-    unsigned int i;
     int ret = 0;
 
     mm_rwlock_init(&p2m->lock);
-    mm_lock_init(&p2m->pod.lock);
     INIT_LIST_HEAD(&p2m->np2m_list);
     INIT_PAGE_LIST_HEAD(&p2m->pages);
-    INIT_PAGE_LIST_HEAD(&p2m->pod.super);
-    INIT_PAGE_LIST_HEAD(&p2m->pod.single);
 
     p2m->domain = d;
     p2m->default_access = p2m_access_rwx;
@@ -69,8 +65,7 @@ static int p2m_initialise(struct domain *d, struct p2m_domain 
*p2m)
     p2m->np2m_base = P2M_BASE_EADDR;
     p2m->np2m_generation = 0;
 
-    for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
-        p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
+    p2m_pod_init(p2m);
 
     if ( hap_enabled(d) && cpu_has_vmx )
         ret = ept_p2m_init(p2m);
@@ -917,6 +912,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
                  gfn_x(gfn), mfn_x(mfn));
         rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order,
                            p2m_invalid, p2m->default_access);
+#ifdef CONFIG_HVM
         if ( rc == 0 )
         {
             pod_lock(p2m);
@@ -924,6 +920,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
             BUG_ON(p2m->pod.entry_count < 0);
             pod_unlock(p2m);
         }
+#endif
     }
 
 out:
@@ -1114,6 +1111,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn_l,
     if ( rc )
         gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
                  gfn_l, order, rc, mfn_x(mfn));
+#ifdef CONFIG_HVM
     else if ( p2m_is_pod(ot) )
     {
         pod_lock(p2m);
@@ -1121,6 +1119,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn_l,
         BUG_ON(p2m->pod.entry_count < 0);
         pod_unlock(p2m);
     }
+#endif
     gfn_unlock(p2m, gfn, order);
 
     return rc;
@@ -1743,9 +1742,11 @@ p2m_flush_table_locked(struct p2m_domain *p2m)
      * when discarding them.
      */
     ASSERT(!p2m_is_hostp2m(p2m));
+#ifdef CONFIG_HVM
     /* Nested p2m's do not do pod, hence the asserts (and no pod lock)*/
     ASSERT(page_list_empty(&p2m->pod.super));
     ASSERT(page_list_empty(&p2m->pod.single));
+#endif
 
     /* No need to flush if it's already empty */
     if ( p2m_is_nestedp2m(p2m) && p2m->np2m_base == P2M_BASE_EADDR )
@@ -2539,7 +2540,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t 
gfn,
 
 /*** Audit ***/
 
-#if P2M_AUDIT
+#if P2M_AUDIT && defined(CONFIG_HVM)
 void audit_p2m(struct domain *d,
                uint64_t *orphans,
                 uint64_t *m2p_bad,
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 996f94b..5c71ce1 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -210,7 +210,8 @@ static void populate_physmap(struct memop_args *a)
             if ( d == curr_d )
                 goto out;
 
-            if ( guest_physmap_mark_populate_on_demand(d, gpfn,
+            if ( is_hvm_domain(d) &&
+                 guest_physmap_mark_populate_on_demand(d, gpfn,
                                                        a->extent_order) < 0 )
                 goto out;
         }
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 4793aac..100da80 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -630,8 +630,6 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
         {
         case XEN_VM_EVENT_ENABLE:
         {
-            struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
             rc = -EOPNOTSUPP;
             /* hvm fixme: p2m_is_foreign types need addressing */
             if ( is_hvm_domain(hardware_domain) )
@@ -649,7 +647,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
 
             rc = -EXDEV;
             /* Disallow paging in a PoD guest */
-            if ( p2m->pod.entry_count )
+            if ( p2m_pod_entry_count(p2m_get_hostp2m(d)) )
                 break;
 
             /* domain_pause() not required here, see XSA-99 */
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d4b3cfc..fb42275 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -306,6 +306,7 @@ struct p2m_domain {
      * to resume the search */
     unsigned long next_shared_gfn_to_relinquish;
 
+#ifdef CONFIG_HVM
     /* Populate-on-demand variables
      * All variables are protected with the pod lock. We cannot rely on
      * the p2m lock if it's turned into a fine-grained lock.
@@ -337,6 +338,8 @@ struct p2m_domain {
         mm_lock_t        lock;         /* Locking of private pod structs,   *
                                         * not relying on the p2m lock.      */
     } pod;
+#endif
+
     union {
         struct ept_data ept;
         /* NPT-equivalent structure could be added here. */
@@ -646,6 +649,12 @@ int p2m_add_foreign(struct domain *tdom, unsigned long 
fgfn,
 /* Dump PoD information about the domain */
 void p2m_pod_dump_data(struct domain *d);
 
+#ifdef CONFIG_HVM
+
+/* Called by p2m code when demand-populating a PoD page */
+bool
+p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order);
+
 /* Move all pages from the populate-on-demand cache to the domain page_list
  * (usually in preparation for domain destruction) */
 int p2m_pod_empty_cache(struct domain *d);
@@ -662,6 +671,45 @@ p2m_pod_offline_or_broken_hit(struct page_info *p);
 void
 p2m_pod_offline_or_broken_replace(struct page_info *p);
 
+static inline long p2m_pod_entry_count(const struct p2m_domain *p2m)
+{
+    return p2m->pod.entry_count;
+}
+
+void p2m_pod_init(struct p2m_domain *p2m);
+
+#else
+
+static inline bool
+p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order)
+{
+    return false;
+}
+
+static inline int p2m_pod_empty_cache(struct domain *d)
+{
+    return 0;
+}
+
+static inline int p2m_pod_offline_or_broken_hit(struct page_info *p)
+{
+    return 0;
+}
+
+static inline void p2m_pod_offline_or_broken_replace(struct page_info *p)
+{
+    ASSERT_UNREACHABLE();
+}
+
+static inline long p2m_pod_entry_count(const struct p2m_domain *p2m)
+{
+    return 0;
+}
+
+static inline void p2m_pod_init(struct p2m_domain *p2m) {}
+
+#endif
+
 
 /*
  * Paging to disk and page-sharing
@@ -730,10 +778,6 @@ extern void audit_p2m(struct domain *d,
 #define P2M_DEBUG(f, a...) do { (void)(f); } while(0)
 #endif
 
-/* Called by p2m code when demand-populating a PoD page */
-bool
-p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order);
-
 /*
  * Functions specific to the p2m-pt implementation
  */
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.