[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/ioreq server: synchronously reset outstanding p2m_ioreq_server entries when an ioreq server unmaps



commit 6d774a951696b1d8dd2dfba00441f6892c9db815
Author:     Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx>
AuthorDate: Fri Apr 7 17:40:04 2017 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Fri Apr 7 17:40:04 2017 +0200

    x86/ioreq server: synchronously reset outstanding p2m_ioreq_server entries 
when an ioreq server unmaps
    
    After an ioreq server has unmapped, the remaining p2m_ioreq_server
    entries need to be reset back to p2m_ram_rw. This patch does this
    synchronously by iterating the p2m table.
    
    The synchronous resetting is necessary because we need to guarantee
    the p2m table is clean before another ioreq server is mapped. And
    since the sweeping of p2m table could be time consuming, it is done
    with hypercall continuation.
    
    Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx>
    Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
 xen/arch/x86/hvm/dm.c     | 41 ++++++++++++++++++++++++++++++++++++++---
 xen/arch/x86/mm/p2m.c     | 29 +++++++++++++++++++++++++++++
 xen/include/asm-x86/p2m.h |  6 ++++++
 3 files changed, 73 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 7e0da81..d72b7bd 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -384,15 +384,50 @@ static int dm_op(domid_t domid,
 
     case XEN_DMOP_map_mem_type_to_ioreq_server:
     {
-        const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
+        struct xen_dm_op_map_mem_type_to_ioreq_server *data =
             &op.u.map_mem_type_to_ioreq_server;
+        unsigned long first_gfn = data->opaque;
+
+        const_op = false;
 
         rc = -EOPNOTSUPP;
         if ( !hap_enabled(d) )
             break;
 
-        rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
-                                              data->type, data->flags);
+        if ( first_gfn == 0 )
+            rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
+                                                  data->type, data->flags);
+        else
+            rc = 0;
+
+        /*
+         * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server,
+         * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw.
+         */
+        if ( rc == 0 && data->flags == 0 )
+        {
+            struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+            while ( read_atomic(&p2m->ioreq.entry_count) &&
+                    first_gfn <= p2m->max_mapped_pfn )
+            {
+                /* Iterate p2m table for 256 gfns each time. */
+                p2m_finish_type_change(d, _gfn(first_gfn), 256,
+                                       p2m_ioreq_server, p2m_ram_rw);
+
+                first_gfn += 256;
+
+                /* Check for continuation if it's not the last iteration. */
+                if ( first_gfn <= p2m->max_mapped_pfn &&
+                     hypercall_preempt_check() )
+                {
+                    rc = -ERESTART;
+                    data->opaque = first_gfn;
+                    break;
+                }
+            }
+        }
+
         break;
     }
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4169d18..1d57e5c 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1011,6 +1011,35 @@ void p2m_change_type_range(struct domain *d,
     p2m_unlock(p2m);
 }
 
+/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
+void p2m_finish_type_change(struct domain *d,
+                            gfn_t first_gfn, unsigned long max_nr,
+                            p2m_type_t ot, p2m_type_t nt)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    p2m_type_t t;
+    unsigned long gfn = gfn_x(first_gfn);
+    unsigned long last_gfn = gfn + max_nr - 1;
+
+    ASSERT(ot != nt);
+    ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));
+
+    p2m_lock(p2m);
+
+    last_gfn = min(last_gfn, p2m->max_mapped_pfn);
+    while ( gfn <= last_gfn )
+    {
+        get_gfn_query_unlocked(d, gfn, &t);
+
+        if ( t == ot )
+            p2m_change_type_one(d, gfn, t, nt);
+
+        gfn++;
+    }
+
+    p2m_unlock(p2m);
+}
+
 /*
  * Returns:
  *    0              for success
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index f27b3a6..7574a9b 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -606,6 +606,12 @@ void p2m_change_type_range(struct domain *d,
 int p2m_change_type_one(struct domain *d, unsigned long gfn,
                         p2m_type_t ot, p2m_type_t nt);
 
+/* Synchronously change the p2m type for a range of gfns */
+void p2m_finish_type_change(struct domain *d,
+                            gfn_t first_gfn,
+                            unsigned long max_nr,
+                            p2m_type_t ot, p2m_type_t nt);
+
 /* Report a change affecting memory types. */
 void p2m_memory_type_changed(struct domain *d);
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.