[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 07/16] x86/p2m/pod: make it build with !CONFIG_HVM



Hi Wei,

On 09/04/2018 05:15 PM, Wei Liu wrote:
Populate-on-demand is HVM only.

Provide a bunch of stubs for common p2m code and guard one invocation
of guest_physmap_mark_populate_on_demand with is_hvm_domain.

Put relevant fields in p2m_domain and code which touches those fields
under CONFIG_HVM.

Arm does not have any POD support. Would it be worth to introduce a CONFIG_HAS_POD to avoid dummy function on Arm?

Cheers,


Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
v3: Put pod related fields and code under CONFIG_HVM.

Note, this should be applied after the toolstack side change.
---
  xen/arch/x86/mm.c         |  2 ++
  xen/arch/x86/mm/p2m-pt.c  |  4 ++++
  xen/arch/x86/mm/p2m.c     | 22 ++++++++++++++++++++--
  xen/common/memory.c       |  3 ++-
  xen/common/vm_event.c     |  4 ++++
  xen/include/asm-x86/p2m.h | 38 ++++++++++++++++++++++++++++++++++----
  6 files changed, 66 insertions(+), 7 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index baea2f5e63..54023e0c69 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4588,6 +4588,7 @@ long arch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
          return 0;
      }
+#ifdef CONFIG_HVM
      case XENMEM_set_pod_target:
      case XENMEM_get_pod_target:
      {
@@ -4644,6 +4645,7 @@ long arch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
          rcu_unlock_domain(d);
          return rc;
      }
+#endif
default:
          return subarch_memory_op(cmd, arg);
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index b8c5d2ed26..74884ea063 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -974,7 +974,9 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
      unsigned long mfn, gfn, m2pfn;
ASSERT(p2m_locked_by_me(p2m));
+#ifdef CONFIG_HVM
      ASSERT(pod_locked_by_me(p2m));
+#endif
/* Audit part one: walk the domain's p2m table, checking the entries. */
      if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
@@ -1105,6 +1107,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
          unmap_domain_page(l4e);
      }
+#ifdef CONFIG_HVM
      if ( entry_count != p2m->pod.entry_count )
      {
          printk("%s: refcounted entry count %ld, audit count %lu!\n",
@@ -1113,6 +1116,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                 entry_count);
          BUG();
      }
+#endif
return pmbad;
  }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 6020553c17..79d0e7203a 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -52,15 +52,20 @@ DEFINE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
  /* Init the datastructures for later use by the p2m code */
  static int p2m_initialise(struct domain *d, struct p2m_domain *p2m)
  {
-    unsigned int i;
      int ret = 0;
+#ifdef CONFIG_HVM
+    unsigned int i;
+#endif
mm_rwlock_init(&p2m->lock);
-    mm_lock_init(&p2m->pod.lock);
      INIT_LIST_HEAD(&p2m->np2m_list);
      INIT_PAGE_LIST_HEAD(&p2m->pages);
+
+#ifdef CONFIG_HVM
+    mm_lock_init(&p2m->pod.lock);
      INIT_PAGE_LIST_HEAD(&p2m->pod.super);
      INIT_PAGE_LIST_HEAD(&p2m->pod.single);
+#endif
p2m->domain = d;
      p2m->default_access = p2m_access_rwx;
@@ -69,8 +74,10 @@ static int p2m_initialise(struct domain *d, struct 
p2m_domain *p2m)
      p2m->np2m_base = P2M_BASE_EADDR;
      p2m->np2m_generation = 0;
+#ifdef CONFIG_HVM
      for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
          p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
+#endif
if ( hap_enabled(d) && cpu_has_vmx )
          ret = ept_p2m_init(p2m);
@@ -917,6 +924,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
                   gfn_x(gfn), mfn_x(mfn));
          rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order,
                             p2m_invalid, p2m->default_access);
+#ifdef CONFIG_HVM
          if ( rc == 0 )
          {
              pod_lock(p2m);
@@ -924,6 +932,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t 
mfn,
              BUG_ON(p2m->pod.entry_count < 0);
              pod_unlock(p2m);
          }
+#endif
      }
out:
@@ -1114,6 +1123,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn_l,
      if ( rc )
          gdprintk(XENLOG_ERR, "p2m_set_entry: %#lx:%u -> %d (0x%"PRI_mfn")\n",
                   gfn_l, order, rc, mfn_x(mfn));
+#ifdef CONFIG_HVM
      else if ( p2m_is_pod(ot) )
      {
          pod_lock(p2m);
@@ -1121,6 +1131,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn_l,
          BUG_ON(p2m->pod.entry_count < 0);
          pod_unlock(p2m);
      }
+#endif
      gfn_unlock(p2m, gfn, order);
return rc;
@@ -1743,9 +1754,11 @@ p2m_flush_table_locked(struct p2m_domain *p2m)
       * when discarding them.
       */
      ASSERT(!p2m_is_hostp2m(p2m));
+#ifdef CONFIG_HVM
      /* Nested p2m's do not do pod, hence the asserts (and no pod lock)*/
      ASSERT(page_list_empty(&p2m->pod.super));
      ASSERT(page_list_empty(&p2m->pod.single));
+#endif
/* No need to flush if it's already empty */
      if ( p2m_is_nestedp2m(p2m) && p2m->np2m_base == P2M_BASE_EADDR )
@@ -2560,7 +2573,10 @@ void audit_p2m(struct domain *d,
      P2M_PRINTK("p2m audit starts\n");
p2m_lock(p2m);
+
+#ifdef CONFIG_HVM
      pod_lock(p2m);
+#endif
if (p2m->audit_p2m)
          pmbad = p2m->audit_p2m(p2m);
@@ -2621,7 +2637,9 @@ void audit_p2m(struct domain *d,
      }
      spin_unlock(&d->page_alloc_lock);
+#ifdef CONFIG_HVM
      pod_unlock(p2m);
+#endif
      p2m_unlock(p2m);
P2M_PRINTK("p2m audit complete\n");
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 996f94b103..5c71ce13ce 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -210,7 +210,8 @@ static void populate_physmap(struct memop_args *a)
              if ( d == curr_d )
                  goto out;
- if ( guest_physmap_mark_populate_on_demand(d, gpfn,
+            if ( is_hvm_domain(d) &&
+                 guest_physmap_mark_populate_on_demand(d, gpfn,
                                                         a->extent_order) < 0 )
                  goto out;
          }
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 4793aacc35..a3bbfc9474 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -630,7 +630,9 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
          {
          case XEN_VM_EVENT_ENABLE:
          {
+#ifdef CONFIG_HVM
              struct p2m_domain *p2m = p2m_get_hostp2m(d);
+#endif
rc = -EOPNOTSUPP;
              /* hvm fixme: p2m_is_foreign types need addressing */
@@ -647,10 +649,12 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec,
              if ( unlikely(need_iommu(d)) )
                  break;
+#ifdef CONFIG_HVM
              rc = -EXDEV;
              /* Disallow paging in a PoD guest */
              if ( p2m->pod.entry_count )
                  break;
+#endif
/* domain_pause() not required here, see XSA-99 */
              rc = vm_event_enable(d, vec, &d->vm_event_paging, _VPF_mem_paging,
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d4b3cfcb6e..3785598f54 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -306,6 +306,7 @@ struct p2m_domain {
       * to resume the search */
      unsigned long next_shared_gfn_to_relinquish;
+#ifdef CONFIG_HVM
      /* Populate-on-demand variables
       * All variables are protected with the pod lock. We cannot rely on
       * the p2m lock if it's turned into a fine-grained lock.
@@ -337,6 +338,8 @@ struct p2m_domain {
          mm_lock_t        lock;         /* Locking of private pod structs,   *
                                          * not relying on the p2m lock.      */
      } pod;
+#endif
+
      union {
          struct ept_data ept;
          /* NPT-equivalent structure could be added here. */
@@ -646,6 +649,12 @@ int p2m_add_foreign(struct domain *tdom, unsigned long 
fgfn,
  /* Dump PoD information about the domain */
  void p2m_pod_dump_data(struct domain *d);
+#ifdef CONFIG_HVM
+
+/* Called by p2m code when demand-populating a PoD page */
+bool
+p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order);
+
  /* Move all pages from the populate-on-demand cache to the domain page_list
   * (usually in preparation for domain destruction) */
  int p2m_pod_empty_cache(struct domain *d);
@@ -662,6 +671,31 @@ p2m_pod_offline_or_broken_hit(struct page_info *p);
  void
  p2m_pod_offline_or_broken_replace(struct page_info *p);
+#else
+
+static inline bool
+p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order)
+{
+    return false;
+}
+
+static inline int p2m_pod_empty_cache(struct domain *d)
+{
+    return 0;
+}
+
+static inline int p2m_pod_offline_or_broken_hit(struct page_info *p)
+{
+    return 0;
+}
+
+static inline void p2m_pod_offline_or_broken_replace(struct page_info *p)
+{
+    ASSERT_UNREACHABLE();
+}
+
+#endif
+
/*
   * Paging to disk and page-sharing
@@ -730,10 +764,6 @@ extern void audit_p2m(struct domain *d,
  #define P2M_DEBUG(f, a...) do { (void)(f); } while(0)
  #endif
-/* Called by p2m code when demand-populating a PoD page */
-bool
-p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, unsigned int order);
-
  /*
   * Functions specific to the p2m-pt implementation
   */


--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.