[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] x86/altp2m: fix display frozen when switching to a new view early



When an new altp2m view is created very early on guest boot, the
display will freeze (although the guest will run normally). This
may also happen on resizing the display. The reason is the way
Xen currently (mis)handles logdirty VGA: it intentionally
misconfigures VGA pages so that they will fault.

The problem is that it only does this in the host p2m. Once we
switch to a new altp2m, the misconfigured entries will no longer
fault, so the display will not be updated.

This patch:
* updates ept_handle_misconfig() to use the active altp2m instead
  of the hostp2m;
* allocates new logdirty ranges for each altp2m;
* has p2m_init_altp2m_ept() copy over max_mapped_pfn,
  and global_logdirty, and merges the logdirty ranges of the
  hostp2m into the logdirty range of the altp2m;
* modifies p2m_change_entry_type_global(), p2m_memory_type_changed
  and p2m_change_type_range() to propagate their changes to all
  valid altp2ms.

Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Suggested-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 xen/arch/x86/mm/p2m-ept.c         |  31 ++++++++++-
 xen/arch/x86/mm/p2m.c             | 112 ++++++++++++++++++++++++++++----------
 xen/drivers/passthrough/pci.c     |   2 +-
 xen/include/asm-x86/hvm/vmx/vmx.h |   3 +-
 xen/include/asm-x86/p2m.h         |  10 ++--
 5 files changed, 123 insertions(+), 35 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index fabcd06..28790bf 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -657,6 +657,9 @@ bool_t ept_handle_misconfig(uint64_t gpa)
     bool_t spurious;
     int rc;
 
+    if ( altp2m_active(curr->domain) )
+        p2m = p2m_get_altp2m(curr);
+
     p2m_lock(p2m);
 
     spurious = curr->arch.hvm.vmx.ept_spurious_misconfig;
@@ -1434,18 +1437,44 @@ void setup_ept_dump(void)
     register_keyhandler('D', ept_dump_p2m_table, "dump VT-x EPT tables", 0);
 }
 
-void p2m_init_altp2m_ept(struct domain *d, unsigned int i)
+int p2m_init_altp2m_ept(struct domain *d, unsigned int i)
 {
     struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
     struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
     struct ept_data *ept;
+    int rc;
+
+    ASSERT(!p2m->sync.logdirty_ranges);
+    p2m->sync.logdirty_ranges = rangeset_new(d, "log-dirty",
+                                             RANGESETF_prettyprint_hex);
+    if ( !p2m->sync.logdirty_ranges )
+        return -ENOMEM;
+
+    rc = rangeset_merge(p2m->sync.logdirty_ranges,
+                        hostp2m->sync.logdirty_ranges);
+    if ( !rc )
+        return rc;
 
     p2m->ept.ad = hostp2m->ept.ad;
+    p2m->max_mapped_pfn = hostp2m->max_mapped_pfn;
+    p2m->default_access = hostp2m->default_access;
+    p2m->domain = hostp2m->domain;
+
+    p2m->sync.global_logdirty = hostp2m->sync.global_logdirty;
     p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
     p2m->max_remapped_gfn = 0;
     ept = &p2m->ept;
     ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
     d->arch.altp2m_eptp[i] = ept->eptp;
+
+    return 0;
+}
+
+void p2m_uninit_altp2m_ept(struct p2m_domain *p2m)
+{
+    ASSERT(p2m->sync.logdirty_ranges);
+    rangeset_destroy(p2m->sync.logdirty_ranges);
+    p2m->sync.logdirty_ranges = NULL;
 }
 
 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 42b9ef4..e9f8385 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -28,6 +28,7 @@
 #include <xen/vm_event.h>
 #include <xen/event.h>
 #include <public/vm_event.h>
+#include <asm/altp2m.h>
 #include <asm/domain.h>
 #include <asm/page.h>
 #include <asm/paging.h>
@@ -119,9 +120,9 @@ static int p2m_init_hostp2m(struct domain *d)
 
     if ( p2m )
     {
-        p2m->logdirty_ranges = rangeset_new(d, "log-dirty",
-                                            RANGESETF_prettyprint_hex);
-        if ( p2m->logdirty_ranges )
+        p2m->sync.logdirty_ranges = rangeset_new(d, "log-dirty",
+                                                 RANGESETF_prettyprint_hex);
+        if ( p2m->sync.logdirty_ranges )
         {
             d->arch.p2m = p2m;
             return 0;
@@ -138,7 +139,7 @@ static void p2m_teardown_hostp2m(struct domain *d)
 
     if ( p2m )
     {
-        rangeset_destroy(p2m->logdirty_ranges);
+        rangeset_destroy(p2m->sync.logdirty_ranges);
         p2m_free_one(p2m);
         d->arch.p2m = NULL;
     }
@@ -193,6 +194,7 @@ static void p2m_teardown_altp2m(struct domain *d)
         if ( !d->arch.altp2m_p2m[i] )
             continue;
         p2m = d->arch.altp2m_p2m[i];
+        p2m_uninit_altp2m_ept(p2m);
         d->arch.altp2m_p2m[i] = NULL;
         p2m_free_one(p2m);
     }
@@ -255,33 +257,55 @@ int p2m_init(struct domain *d)
 int p2m_is_logdirty_range(struct p2m_domain *p2m, unsigned long start,
                           unsigned long end)
 {
-    ASSERT(p2m_is_hostp2m(p2m));
-    if ( p2m->global_logdirty ||
-         rangeset_contains_range(p2m->logdirty_ranges, start, end) )
+    if ( p2m->sync.global_logdirty ||
+         rangeset_contains_range(p2m->sync.logdirty_ranges, start, end) )
         return 1;
-    if ( rangeset_overlaps_range(p2m->logdirty_ranges, start, end) )
+    if ( rangeset_overlaps_range(p2m->sync.logdirty_ranges, start, end) )
         return -1;
     return 0;
 }
 
+static void _p2m_change_entry_type_global(struct p2m_domain *p2m,
+                                          p2m_type_t ot, p2m_type_t nt)
+{
+    p2m->change_entry_type_global(p2m, ot, nt);
+    p2m->sync.global_logdirty = (nt == p2m_ram_logdirty);
+}
+
 void p2m_change_entry_type_global(struct domain *d,
                                   p2m_type_t ot, p2m_type_t nt)
 {
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
 
     ASSERT(ot != nt);
     ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));
 
-    p2m_lock(p2m);
-    p2m->change_entry_type_global(p2m, ot, nt);
-    p2m->global_logdirty = (nt == p2m_ram_logdirty);
-    p2m_unlock(p2m);
+    p2m_lock(hostp2m);
+
+    _p2m_change_entry_type_global(p2m_get_hostp2m(d), ot, nt);
+
+#ifdef CONFIG_HVM
+    if ( unlikely(altp2m_active(d)) )
+    {
+        unsigned int i;
+
+        for ( i = 0; i < MAX_ALTP2M; i++ )
+            if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+            {
+                struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+
+                p2m_lock(p2m);
+                _p2m_change_entry_type_global(p2m, ot, nt);
+                p2m_unlock(p2m);
+            }
+    }
+#endif
+
+    p2m_unlock(hostp2m);
 }
 
-void p2m_memory_type_changed(struct domain *d)
+void _p2m_memory_type_changed(struct p2m_domain *p2m)
 {
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-
     if ( p2m->memory_type_changed )
     {
         p2m_lock(p2m);
@@ -290,6 +314,22 @@ void p2m_memory_type_changed(struct domain *d)
     }
 }
 
+void p2m_memory_type_changed(struct domain *d)
+{
+#ifdef CONFIG_HVM
+    if ( unlikely(altp2m_active(d)) )
+    {
+        unsigned int i;
+
+        for ( i = 0; i < MAX_ALTP2M; i++ )
+            if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+                _p2m_memory_type_changed(d->arch.altp2m_p2m[i]);
+    }
+#endif
+
+    _p2m_memory_type_changed(p2m_get_hostp2m(d));
+}
+
 int p2m_set_ioreq_server(struct domain *d,
                          unsigned int flags,
                          struct hvm_ioreq_server *s)
@@ -970,12 +1010,12 @@ int p2m_change_type_one(struct domain *d, unsigned long 
gfn_l,
 }
 
 /* Modify the p2m type of a range of gfns from ot to nt. */
-void p2m_change_type_range(struct domain *d, 
-                           unsigned long start, unsigned long end,
-                           p2m_type_t ot, p2m_type_t nt)
+static void _p2m_change_type_range(struct p2m_domain *p2m,
+                                   unsigned long start, unsigned long end,
+                                   p2m_type_t ot, p2m_type_t nt)
 {
+    struct domain *d = p2m->domain;
     unsigned long gfn = start;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     int rc = 0;
 
     ASSERT(ot != nt);
@@ -1006,11 +1046,11 @@ void p2m_change_type_range(struct domain *d,
     {
     case p2m_ram_rw:
         if ( ot == p2m_ram_logdirty )
-            rc = rangeset_remove_range(p2m->logdirty_ranges, start, end - 1);
+            rc = rangeset_remove_range(p2m->sync.logdirty_ranges, start, end - 
1);
         break;
     case p2m_ram_logdirty:
         if ( ot == p2m_ram_rw )
-            rc = rangeset_add_range(p2m->logdirty_ranges, start, end - 1);
+            rc = rangeset_add_range(p2m->sync.logdirty_ranges, start, end - 1);
         break;
     default:
         break;
@@ -1028,6 +1068,25 @@ void p2m_change_type_range(struct domain *d,
     p2m_unlock(p2m);
 }
 
+void p2m_change_type_range(struct domain *d,
+                           unsigned long start, unsigned long end,
+                           p2m_type_t ot, p2m_type_t nt)
+{
+#ifdef CONFIG_HVM
+    if ( unlikely(altp2m_active(d)) )
+    {
+        unsigned int i;
+
+        for ( i = 0; i < MAX_ALTP2M; i++ )
+            if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+                _p2m_change_type_range(d->arch.altp2m_p2m[i], start, end, ot,
+                                       nt);
+    }
+#endif
+
+    _p2m_change_type_range(p2m_get_hostp2m(d), start, end, ot, nt);
+}
+
 /*
  * Finish p2m type change for gfns which are marked as need_recalc in a range.
  * Returns: 0/1 for success, negative for failure
@@ -2289,10 +2348,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int 
idx)
     altp2m_list_lock(d);
 
     if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
-    {
-        p2m_init_altp2m_ept(d, idx);
-        rc = 0;
-    }
+        rc = p2m_init_altp2m_ept(d, idx);
 
     altp2m_list_unlock(d);
     return rc;
@@ -2310,9 +2366,8 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
         if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
             continue;
 
-        p2m_init_altp2m_ept(d, i);
+        rc = p2m_init_altp2m_ept(d, i);
         *idx = i;
-        rc = 0;
 
         break;
     }
@@ -2341,6 +2396,7 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned 
int idx)
         {
             p2m_flush_table(d->arch.altp2m_p2m[idx]);
             /* Uninit and reinit ept to force TLB shootdown */
+            p2m_uninit_altp2m_ept(d->arch.altp2m_p2m[idx]);
             ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
             ept_p2m_init(d->arch.altp2m_p2m[idx]);
             d->arch.altp2m_eptp[idx] = mfn_x(INVALID_MFN);
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index e5b9602..390c748 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1418,7 +1418,7 @@ static int assign_device(struct domain *d, u16 seg, u8 
bus, u8 devfn, u32 flag)
      * enabled for this domain */
     if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
                   vm_event_check_ring(d->vm_event_paging) ||
-                  p2m_get_hostp2m(d)->global_logdirty) )
+                  p2m_get_hostp2m(d)->sync.global_logdirty) )
         return -EXDEV;
 
     if ( !pcidevs_trylock() )
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h 
b/xen/include/asm-x86/hvm/vmx/vmx.h
index b110e16..b2a1094 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -598,7 +598,8 @@ void ept_p2m_uninit(struct p2m_domain *p2m);
 void ept_walk_table(struct domain *d, unsigned long gfn);
 bool_t ept_handle_misconfig(uint64_t gpa);
 void setup_ept_dump(void);
-void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
+int p2m_init_altp2m_ept(struct domain *d, unsigned int i);
+void p2m_uninit_altp2m_ept(struct p2m_domain *p2m);
 /* Locate an alternate p2m by its EPTP */
 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
 
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d08c595..7346eeb 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -219,11 +219,13 @@ struct p2m_domain {
     struct list_head   np2m_list;
 #endif
 
-    /* Host p2m: Log-dirty ranges registered for the domain. */
-    struct rangeset   *logdirty_ranges;
+    struct {
+        /* Host p2m: Log-dirty ranges registered for the domain. */
+        struct rangeset   *logdirty_ranges;
 
-    /* Host p2m: Global log-dirty mode enabled for the domain. */
-    bool_t             global_logdirty;
+        /* Host p2m: Global log-dirty mode enabled for the domain. */
+        bool               global_logdirty;
+    } sync;
 
     /* Host p2m: when this flag is set, don't flush all the nested-p2m 
      * tables on every host-p2m change.  The setter of this flag 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.