[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.7] x86/vmx: Internal cleanup for MSR load/save infrastructure



commit fe028e615c9d4995685d48383fed5edc3054fa5a
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon May 7 11:57:00 2018 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Aug 14 17:27:58 2018 +0100

    x86/vmx: Internal cleanup for MSR load/save infrastructure
    
     * Use an arch_vmx_struct local variable to reduce later code volume.
     * Use start/total instead of msr_area/msr_count.  This is in preparation 
for
       more finegrained handling with later changes.
     * Use ent/end pointers (again for preparation), and to make the 
vmx_add_msr()
       logic easier to follow.
     * Make the memory allocation block of vmx_add_msr() unlikely, and calculate
       virt_to_maddr() just once.
    
    No practical change to functionality.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
    (cherry picked from commit 94fda356fcdcc847662a4c9f6cc63511f25c1247)
---
 xen/arch/x86/hvm/vmx/vmcs.c | 76 +++++++++++++++++++++++++--------------------
 1 file changed, 42 insertions(+), 34 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 8ddb3176c2..e924685e9d 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1335,33 +1335,33 @@ static int construct_vmcs(struct vcpu *v)
 struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type)
 {
     struct vcpu *curr = current;
-    unsigned int msr_count;
-    struct vmx_msr_entry *msr_area = NULL;
-    unsigned int i;
+    struct arch_vmx_struct *vmx = &curr->arch.hvm_vmx;
+    struct vmx_msr_entry *start = NULL;
+    unsigned int i, total;
 
     switch ( type )
     {
     case VMX_MSR_HOST:
-        msr_count = curr->arch.hvm_vmx.host_msr_count;
-        msr_area = curr->arch.hvm_vmx.host_msr_area;
+        start    = vmx->host_msr_area;
+        total    = vmx->host_msr_count;
         break;
 
     case VMX_MSR_GUEST:
-        msr_count = curr->arch.hvm_vmx.msr_count;
-        msr_area = curr->arch.hvm_vmx.msr_area;
+        start    = vmx->msr_area;
+        total    = vmx->msr_count;
         break;
 
     default:
         ASSERT_UNREACHABLE();
     }
 
-    if ( msr_area == NULL )
+    if ( !start )
         return NULL;
 
-    for ( i = 0; i < msr_count; i++ )
+    for ( i = 0; i < total; i++ )
     {
-        if ( msr_area[i].index == msr )
-            return &msr_area[i];
+        if ( start[i].index == msr )
+            return &start[i];
     }
 
     return NULL;
@@ -1370,19 +1370,20 @@ struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum 
vmx_msr_list_type type)
 int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type)
 {
     struct vcpu *curr = current;
-    unsigned int idx, *msr_count;
-    struct vmx_msr_entry **msr_area, *msr_area_elem;
+    struct arch_vmx_struct *vmx = &curr->arch.hvm_vmx;
+    struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
+    unsigned int total;
 
     switch ( type )
     {
     case VMX_MSR_HOST:
-        msr_count = &curr->arch.hvm_vmx.host_msr_count;
-        msr_area = &curr->arch.hvm_vmx.host_msr_area;
+        ptr      = &vmx->host_msr_area;
+        total    = vmx->host_msr_count;
         break;
 
     case VMX_MSR_GUEST:
-        msr_count = &curr->arch.hvm_vmx.msr_count;
-        msr_area = &curr->arch.hvm_vmx.msr_area;
+        ptr      = &vmx->msr_area;
+        total    = vmx->msr_count;
         break;
 
     default:
@@ -1390,48 +1391,55 @@ int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type 
type)
         return -EINVAL;
     }
 
-    if ( *msr_area == NULL )
+    /* Allocate memory on first use. */
+    if ( unlikely(!*ptr) )
     {
-        if ( (*msr_area = alloc_xenheap_page()) == NULL )
+        paddr_t addr;
+
+        if ( (*ptr = alloc_xenheap_page()) == NULL )
             return -ENOMEM;
 
+        addr = virt_to_maddr(*ptr);
+
         switch ( type )
         {
         case VMX_MSR_HOST:
-            __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+            __vmwrite(VM_EXIT_MSR_LOAD_ADDR, addr);
             break;
 
         case VMX_MSR_GUEST:
-            __vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area));
-            __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+            __vmwrite(VM_EXIT_MSR_STORE_ADDR, addr);
+            __vmwrite(VM_ENTRY_MSR_LOAD_ADDR, addr);
             break;
         }
     }
 
-    for ( idx = 0; idx < *msr_count; idx++ )
-        if ( (*msr_area)[idx].index == msr )
+    start = *ptr;
+    end   = start + total;
+
+    for ( ent = start; ent < end && ent->index <= msr; ++ent )
+        if ( ent->index == msr )
             return 0;
 
-    if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
+    if ( total == (PAGE_SIZE / sizeof(*ent)) )
         return -ENOSPC;
 
-    msr_area_elem = *msr_area + *msr_count;
-    msr_area_elem->index = msr;
-    msr_area_elem->mbz = 0;
+    memmove(ent + 1, ent, sizeof(*ent) * (end - ent));
 
-    ++*msr_count;
+    ent->index = msr;
+    ent->mbz = 0;
 
     switch ( type )
     {
     case VMX_MSR_HOST:
-        rdmsrl(msr, msr_area_elem->data);
-        __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
+        rdmsrl(msr, ent->data);
+        __vmwrite(VM_EXIT_MSR_LOAD_COUNT, ++vmx->host_msr_count);
         break;
 
     case VMX_MSR_GUEST:
-        msr_area_elem->data = 0;
-        __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
-        __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
+        ent->data = 0;
+        __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_count);
+        __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_count);
         break;
     }
 
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.7

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.