[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 1/2] xen/x86: ensure copying runstate/time to L1 rather than L2



For a HVM domain, if a vcpu is in the nested guest mode,
__raw_copy_to_guest(), __copy_to_guest() and __copy_field_to_guest()
used by update_runstate_area() and update_secondary_system_time() will
copy data to L2 guest rather than the L1 guest.

This commit temporally clears the nested guest flag before all guest
copies in update_runstate_area() and update_secondary_system_time(),
and restores the flag after those guest copy operations.

The flag clear/restore is combined with the existing
smap_policy_change() which is renamed to update_guest_memory_policy().

Signed-off-by: Haozhong Zhang <haozhong.zhang@xxxxxxxxx>
---
Changes since v2:
 * Combine the fix with existing smap_policy_change() which is renamed to
   update_guest_memory_policy().
---
 xen/arch/x86/domain.c        | 39 +++++++++++++++++++++++++++++++--------
 xen/arch/x86/time.c          |  9 +++++----
 xen/include/asm-x86/domain.h | 10 ++++++++--
 3 files changed, 44 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 7d3071e..0bf1909 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -50,6 +50,7 @@
 #include <asm/mpspec.h>
 #include <asm/ldt.h>
 #include <asm/hvm/hvm.h>
+#include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/viridian.h>
 #include <asm/debugreg.h>
@@ -200,12 +201,33 @@ void dump_pageframe_info(struct domain *d)
     spin_unlock(&d->page_alloc_lock);
 }
 
-smap_check_policy_t smap_policy_change(struct vcpu *v,
-    smap_check_policy_t new_policy)
+void update_guest_memory_policy(struct vcpu *v,
+                                struct guest_memory_policy *policy)
 {
-    smap_check_policy_t old_policy = v->arch.smap_check_policy;
-    v->arch.smap_check_policy = new_policy;
-    return old_policy;
+    smap_check_policy_t old_smap_policy = v->arch.smap_check_policy;
+    bool old_guest_mode = nestedhvm_is_n2(v);
+    bool new_guest_mode = policy->nested_guest_mode;
+
+    v->arch.smap_check_policy = policy->smap_policy;
+    policy->smap_policy = old_smap_policy;
+
+    /*
+     * When 'v' is in the nested guest mode, all guest copy
+     * functions/macros which finally call paging_gva_to_gfn()
+     * transfer data to/from L2 guest. If the copy is intended for L1
+     * guest, we must first clear the nested guest flag (by setting
+     * policy->nested_guest_mode to false) before the copy and then
+     * restore the nested guest flag (by setting
+     * policy->nested_guest_mode to true) after the copy.
+     */
+    if ( unlikely(old_guest_mode != new_guest_mode) )
+    {
+        if ( new_guest_mode )
+            nestedhvm_vcpu_enter_guestmode(v);
+        else
+            nestedhvm_vcpu_exit_guestmode(v);
+        policy->nested_guest_mode = old_guest_mode;
+    }
 }
 
 #ifndef CONFIG_BIGMEM
@@ -1929,13 +1951,14 @@ static void paravirt_ctxt_switch_to(struct vcpu *v)
 bool_t update_runstate_area(struct vcpu *v)
 {
     bool_t rc;
-    smap_check_policy_t smap_policy;
+    struct guest_memory_policy policy =
+        { .smap_policy = SMAP_CHECK_ENABLED, .nested_guest_mode = false };
     void __user *guest_handle = NULL;
 
     if ( guest_handle_is_null(runstate_guest(v)) )
         return 1;
 
-    smap_policy = smap_policy_change(v, SMAP_CHECK_ENABLED);
+    update_guest_memory_policy(v, &policy);
 
     if ( VM_ASSIST(v->domain, runstate_update_flag) )
     {
@@ -1969,7 +1992,7 @@ bool_t update_runstate_area(struct vcpu *v)
                             (void *)(&v->runstate.state_entry_time + 1) - 1, 
1);
     }
 
-    smap_policy_change(v, smap_policy);
+    update_guest_memory_policy(v, &policy);
 
     return rc;
 }
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 3ad2ab0..faa638b 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -991,17 +991,18 @@ bool_t update_secondary_system_time(struct vcpu *v,
                                     struct vcpu_time_info *u)
 {
     XEN_GUEST_HANDLE(vcpu_time_info_t) user_u = v->arch.time_info_guest;
-    smap_check_policy_t saved_policy;
+    struct guest_memory_policy policy =
+        { .smap_policy = SMAP_CHECK_ENABLED, .nested_guest_mode = false };
 
     if ( guest_handle_is_null(user_u) )
         return 1;
 
-    saved_policy = smap_policy_change(v, SMAP_CHECK_ENABLED);
+    update_guest_memory_policy(v, &policy);
 
     /* 1. Update userspace version. */
     if ( __copy_field_to_guest(user_u, u, version) == sizeof(u->version) )
     {
-        smap_policy_change(v, saved_policy);
+        update_guest_memory_policy(v, &policy);
         return 0;
     }
     wmb();
@@ -1012,7 +1013,7 @@ bool_t update_secondary_system_time(struct vcpu *v,
     u->version = version_update_end(u->version);
     __copy_field_to_guest(user_u, u, version);
 
-    smap_policy_change(v, saved_policy);
+    update_guest_memory_policy(v, &policy);
 
     return 1;
 }
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 2839a73..7b05c84 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -577,8 +577,14 @@ struct arch_vcpu
     } monitor;
 };
 
-smap_check_policy_t smap_policy_change(struct vcpu *v,
-                                       smap_check_policy_t new_policy);
+struct guest_memory_policy
+{
+    smap_check_policy_t smap_policy;
+    bool nested_guest_mode;
+};
+
+void update_guest_memory_policy(struct vcpu *v,
+                                struct guest_memory_policy *policy);
 
 /* Shorthands to improve code legibility. */
 #define hvm_vmx         hvm_vcpu.u.vmx
-- 
2.10.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.