[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v5 2/2] xen: move TLB-flush filtering out into populate_physmap during vm creation



This patch implemented parts of TODO left in commit id
a902c12ee45fc9389eb8fe54eeddaf267a555c58 (More efficient TLB-flush
filtering in alloc_heap_pages()). It moved TLB-flush filtering out into
populate_physmap. Because of TLB-flush in alloc_heap_pages, it's very slow
to create a guest with memory size of more than 100GB on host with 100+
cpus.

This patch introduced a "MEMF_no_tlbflush" bit to memflags to indicate
whether TLB-flush should be done in alloc_heap_pages or its caller
populate_physmap.  Once this bit is set in memflags, alloc_heap_pages will
ignore TLB-flush. To use this bit after vm is created might lead to
security issue, that is, this would make pages accessible to the guest B,
when guest A may still have a cached mapping to them.

Therefore, this patch also introduced a "creation_finished" field to struct
domain to indicate whether this domain has ever got unpaused by hypervisor.
MEMF_no_tlbflush can be set only during vm creation phase when
creation_finished is still false before this domain gets unpaused for the
first time.

Signed-off-by: Dongli Zhang <dongli.zhang@xxxxxxxxxx>
---
Changed since v4:
  * Rename is_ever_unpaused to creation_finished.
  * Change bool_t to bool.
  * Polish comments.

Changed since v3:
  * Set the flag to true in domain_unpause_by_systemcontroller when
    unpausing the guest domain for the first time.
  * Use true/false for all boot_t variables.
  * Add unlikely to optimize "if statement".
  * Correct comment style.

Changed since v2:
  * Limit this optimization to domain creation time.

---
 xen/common/domain.c     |  8 ++++++++
 xen/common/memory.c     | 28 ++++++++++++++++++++++++++++
 xen/common/page_alloc.c |  3 ++-
 xen/include/xen/mm.h    |  2 ++
 xen/include/xen/sched.h |  6 ++++++
 5 files changed, 46 insertions(+), 1 deletion(-)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index a8804e4..c170c69 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1004,6 +1004,14 @@ int domain_unpause_by_systemcontroller(struct domain *d)
 {
     int old, new, prev = d->controller_pause_count;
 
+    /*
+     * We record this information here for populate_physmap to figure out
+     * that the domain has finished being created. In fact, we're only
+     * allowed to set the MEMF_no_tlbflush flag during VM creation.
+     */
+    if ( unlikely(!d->creation_finished) )
+        d->creation_finished = true;
+
     do
     {
         old = prev;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index cc0f69e..27d1f2a 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -141,6 +141,8 @@ static void populate_physmap(struct memop_args *a)
     unsigned int i, j;
     xen_pfn_t gpfn, mfn;
     struct domain *d = a->domain, *curr_d = current->domain;
+    bool need_tlbflush = false;
+    uint32_t tlbflush_timestamp = 0;
 
     if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
                                      a->nr_extents-1) )
@@ -150,6 +152,17 @@ static void populate_physmap(struct memop_args *a)
                             max_order(curr_d)) )
         return;
 
+    /*
+     * With MEMF_no_tlbflush set, alloc_heap_pages() will ignore
+     * TLB-flushes. After VM creation, this is a security issue (it can
+     * make pages accessible to guest B, when guest A may still have a
+     * cached mapping to them). So we only do this only during domain
+     * creation, when the domain itself has not yet been unpaused for the
+     * first time.
+     */
+    if ( unlikely(!d->creation_finished) )
+        a->memflags |= MEMF_no_tlbflush;
+
     for ( i = a->nr_done; i < a->nr_extents; i++ )
     {
         if ( i != a->nr_done && hypercall_preempt_check() )
@@ -214,6 +227,19 @@ static void populate_physmap(struct memop_args *a)
                     goto out;
                 }
 
+                if ( unlikely(a->memflags & MEMF_no_tlbflush) )
+                {
+                    for ( j = 0; j < (1U << a->extent_order); j++ )
+                    {
+                        if ( accumulate_tlbflush(need_tlbflush, &page[j],
+                                                 tlbflush_timestamp) )
+                        {
+                            need_tlbflush = true;
+                            tlbflush_timestamp = page[j].tlbflush_timestamp;
+                        }
+                    }
+                }
+
                 mfn = page_to_mfn(page);
             }
 
@@ -232,6 +258,8 @@ static void populate_physmap(struct memop_args *a)
     }
 
 out:
+    if ( need_tlbflush )
+        filtered_flush_tlb_mask(tlbflush_timestamp);
     a->nr_done = i;
 }
 
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 173c10d..a67f49b 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -827,7 +827,8 @@ static struct page_info *alloc_heap_pages(
         BUG_ON(pg[i].count_info != PGC_state_free);
         pg[i].count_info = PGC_state_inuse;
 
-        if ( accumulate_tlbflush(need_tlbflush, &pg[i],
+        if ( !(memflags & MEMF_no_tlbflush) &&
+             accumulate_tlbflush(need_tlbflush, &pg[i],
                                  tlbflush_timestamp) )
         {
             need_tlbflush = 1;
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 85848e3..980b056 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -222,6 +222,8 @@ struct npfec {
 #define  MEMF_exact_node  (1U<<_MEMF_exact_node)
 #define _MEMF_no_owner    5
 #define  MEMF_no_owner    (1U<<_MEMF_no_owner)
+#define _MEMF_no_tlbflush 6
+#define  MEMF_no_tlbflush (1U<<_MEMF_no_tlbflush)
 #define _MEMF_node        8
 #define  MEMF_node_mask   ((1U << (8 * sizeof(nodeid_t))) - 1)
 #define  MEMF_node(n)     ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2f9c15f..fbcfbd0 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -474,6 +474,12 @@ struct domain
         unsigned int guest_request_enabled       : 1;
         unsigned int guest_request_sync          : 1;
     } monitor;
+
+    /*
+     * Set to true at the very end of domain creation, when the domain is
+     * unpaused for the first time by the systemcontroller.
+     */
+    bool creation_finished;
 };
 
 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.