[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.6] x86/vmx: Support load-only guest MSR list entries



commit a80ce1703fc766190c0d348c788d088410ca074b
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon May 7 11:57:00 2018 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Aug 14 17:39:56 2018 +0100

    x86/vmx: Support load-only guest MSR list entries
    
    Currently, the VMX_MSR_GUEST type maintains completely symmetric guest load
    and save lists, by pointing VM_EXIT_MSR_STORE_ADDR and 
VM_ENTRY_MSR_LOAD_ADDR
    at the same page, and setting VM_EXIT_MSR_STORE_COUNT and
    VM_ENTRY_MSR_LOAD_COUNT to the same value.
    
    However, for MSRs which we won't let the guest have direct access to, having
    hardware save the current value on VMExit is unnecessary overhead.
    
    To avoid this overhead, we must make the load and save lists asymmetric.  By
    making the entry load count greater than the exit store count, we can 
maintain
    two adjacent lists of MSRs, the first of which is saved and restored, and 
the
    second of which is only restored on VMEntry.
    
    For simplicity:
     * Both adjacent lists are still sorted by MSR index.
     * It undefined behaviour to insert the same MSR into both lists.
     * The total size of both lists is still limited at 256 entries (one 4k 
page).
    
    Split the current msr_count field into msr_{load,save}_count, and introduce 
a
    new VMX_MSR_GUEST_LOADONLY type, and update vmx_{add,find}_msr() to 
calculate
    which sublist to search, based on type.  VMX_MSR_HOST has no logical 
sublist,
    whereas VMX_MSR_GUEST has a sublist between 0 and the save count, while
    VMX_MSR_GUEST_LOADONLY has a sublist between the save count and the load
    count.
    
    One subtle point is that inserting an MSR into the load-save list involves
    moving the entire load-only list, and updating both counts.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
    Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
    (cherry picked from commit 1ac46b55632626aeb935726e1b0a71605ef6763a)
---
 xen/arch/x86/hvm/vmx/vmcs.c        | 46 +++++++++++++++++++++++++++++---------
 xen/include/asm-x86/hvm/vmx/vmcs.h | 13 ++++++++++-
 2 files changed, 48 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 327bc30a85..dc0e6a43c5 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1317,7 +1317,7 @@ struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, 
uint32_t msr,
 {
     const struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
     struct vmx_msr_entry *start = NULL, *ent, *end;
-    unsigned int total;
+    unsigned int substart, subend, total;
 
     ASSERT(v == current || !vcpu_runnable(v));
 
@@ -1325,12 +1325,23 @@ struct vmx_msr_entry *vmx_find_msr(const struct vcpu 
*v, uint32_t msr,
     {
     case VMX_MSR_HOST:
         start    = vmx->host_msr_area;
-        total    = vmx->host_msr_count;
+        substart = 0;
+        subend   = vmx->host_msr_count;
+        total    = subend;
         break;
 
     case VMX_MSR_GUEST:
         start    = vmx->msr_area;
-        total    = vmx->msr_count;
+        substart = 0;
+        subend   = vmx->msr_save_count;
+        total    = vmx->msr_load_count;
+        break;
+
+    case VMX_MSR_GUEST_LOADONLY:
+        start    = vmx->msr_area;
+        substart = vmx->msr_save_count;
+        subend   = vmx->msr_load_count;
+        total    = subend;
         break;
 
     default:
@@ -1341,7 +1352,7 @@ struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, 
uint32_t msr,
         return NULL;
 
     end = start + total;
-    ent = locate_msr_entry(start, end, msr);
+    ent = locate_msr_entry(start + substart, start + subend, msr);
 
     return ((ent < end) && (ent->index == msr)) ? ent : NULL;
 }
@@ -1350,7 +1361,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, enum 
vmx_msr_list_type type)
 {
     struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
     struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
-    unsigned int total;
+    unsigned int substart, subend, total;
     int rc;
 
     ASSERT(v == current || !vcpu_runnable(v));
@@ -1359,12 +1370,23 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, enum 
vmx_msr_list_type type)
     {
     case VMX_MSR_HOST:
         ptr      = &vmx->host_msr_area;
-        total    = vmx->host_msr_count;
+        substart = 0;
+        subend   = vmx->host_msr_count;
+        total    = subend;
         break;
 
     case VMX_MSR_GUEST:
         ptr      = &vmx->msr_area;
-        total    = vmx->msr_count;
+        substart = 0;
+        subend   = vmx->msr_save_count;
+        total    = vmx->msr_load_count;
+        break;
+
+    case VMX_MSR_GUEST_LOADONLY:
+        ptr      = &vmx->msr_area;
+        substart = vmx->msr_save_count;
+        subend   = vmx->msr_load_count;
+        total    = subend;
         break;
 
     default:
@@ -1394,6 +1416,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, enum 
vmx_msr_list_type type)
             break;
 
         case VMX_MSR_GUEST:
+        case VMX_MSR_GUEST_LOADONLY:
             __vmwrite(VM_EXIT_MSR_STORE_ADDR, addr);
             __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, addr);
             break;
@@ -1402,7 +1425,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, enum 
vmx_msr_list_type type)
 
     start = *ptr;
     end   = start + total;
-    ent   = locate_msr_entry(start, end, msr);
+    ent   = locate_msr_entry(start + substart, start + subend, msr);
 
     if ( (ent < end) && (ent->index == msr) )
     {
@@ -1429,9 +1452,12 @@ int vmx_add_msr(struct vcpu *v, uint32_t msr, enum 
vmx_msr_list_type type)
         break;
 
     case VMX_MSR_GUEST:
+        __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_save_count);
+
+        /* Fallthrough */
+    case VMX_MSR_GUEST_LOADONLY:
         ent->data = 0;
-        __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_count);
-        __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_count);
+        __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, ++vmx->msr_load_count);
         break;
     }
 
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index b12bc1efc6..8aefe89f20 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -128,7 +128,8 @@ struct arch_vmx_struct {
      */
     struct vmx_msr_entry *msr_area;
     struct vmx_msr_entry *host_msr_area;
-    unsigned int         msr_count;
+    unsigned int         msr_load_count;
+    unsigned int         msr_save_count;
     unsigned int         host_msr_count;
 
     unsigned long        eoi_exitmap_changed;
@@ -500,8 +501,18 @@ extern const unsigned int 
vmx_introspection_force_enabled_msrs_size;
 enum vmx_msr_list_type {
     VMX_MSR_HOST,           /* MSRs loaded on VMExit.                   */
     VMX_MSR_GUEST,          /* MSRs saved on VMExit, loaded on VMEntry. */
+    VMX_MSR_GUEST_LOADONLY, /* MSRs loaded on VMEntry only.             */
 };
 
+/**
+ * Add an MSR to an MSR list.  No-op if the MSR already exists.
+ *
+ * It is undefined behaviour to try and insert the same MSR into both the
+ * GUEST and GUEST_LOADONLY list.
+ *
+ * May fail if unable to allocate memory for the list, or the total number of
+ * entries exceeds the memory allocated.
+ */
 int vmx_add_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type);
 
 static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr)
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.6

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.