[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] x86/VMX: introduce vmx_find_guest_msr()



Currently there can be up to 256 entries in a guest's MSR array and all
entries are stored in the order they were added.  Such design requires
to perform a linear scan of the whole array in order to find the MSR
with required index which can be a costly operation.

To avoid that, reuse the existing code for heap sort and binary search
and optimize existing functions which deal with guest's MSR arrays.
This way the time complexity of searching for required MSR reduces from
linear to logarithmic.

Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmcs.c        | 80 +++++++++++++++++++++++++++++---------
 xen/include/asm-x86/hvm/vmx/vmcs.h |  1 +
 2 files changed, 63 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 59ef199..d04de8d 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -24,6 +24,7 @@
 #include <xen/event.h>
 #include <xen/kernel.h>
 #include <xen/keyhandler.h>
+#include <xen/sort.h>
 #include <xen/vm_event.h>
 #include <asm/current.h>
 #include <asm/cpufeature.h>
@@ -1283,19 +1284,36 @@ static int construct_vmcs(struct vcpu *v)
     return 0;
 }
 
-int vmx_read_guest_msr(u32 msr, u64 *val)
+static int vmx_msr_entry_key_cmp(const void *key, const void *elt)
 {
-    struct vcpu *curr = current;
-    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
+    const u32 *msr = key;
+    const struct vmx_msr_entry *entry = elt;
+
+    if ( *msr > entry->index )
+        return 1;
+    if ( *msr < entry->index )
+        return -1;
+    return 0;
+}
+
+struct vmx_msr_entry *vmx_find_guest_msr(const u32 msr)
+{
+    const struct vcpu *curr = current;
+    const unsigned int msr_count = curr->arch.hvm_vmx.msr_count;
     const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
 
-    for ( i = 0; i < msr_count; i++ )
+    return bsearch(&msr, msr_area, msr_count, sizeof(struct vmx_msr_entry),
+                   vmx_msr_entry_key_cmp);
+}
+
+int vmx_read_guest_msr(u32 msr, u64 *val)
+{
+    const struct vmx_msr_entry *ent;
+
+    if ( (ent = vmx_find_guest_msr(msr)) != NULL )
     {
-        if ( msr_area[i].index == msr )
-        {
-            *val = msr_area[i].data;
+            *val = ent->data;
             return 0;
-        }
     }
 
     return -ESRCH;
@@ -1303,22 +1321,37 @@ int vmx_read_guest_msr(u32 msr, u64 *val)
 
 int vmx_write_guest_msr(u32 msr, u64 val)
 {
-    struct vcpu *curr = current;
-    unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
-    struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+    struct vmx_msr_entry *ent;
 
-    for ( i = 0; i < msr_count; i++ )
+    if ( (ent = vmx_find_guest_msr(msr)) != NULL )
     {
-        if ( msr_area[i].index == msr )
-        {
-            msr_area[i].data = val;
+            ent->data = val;
             return 0;
-        }
     }
 
     return -ESRCH;
 }
 
+static void vmx_msr_entry_swap(void *a, void *b, int size)
+{
+    struct vmx_msr_entry *l = a, *r = b, tmp;
+
+    tmp = *l;
+    *l = *r;
+    *r = tmp;
+}
+
+static int vmx_msr_entry_cmp(const void *a, const void *b)
+{
+    const struct vmx_msr_entry *l = a, *r = b;
+
+    if ( l->index > r->index )
+        return 1;
+    if ( l->index < r->index )
+        return -1;
+    return 0;
+}
+
 int vmx_add_msr(u32 msr, int type)
 {
     struct vcpu *curr = current;
@@ -1351,9 +1384,17 @@ int vmx_add_msr(u32 msr, int type)
             __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
     }
 
-    for ( idx = 0; idx < *msr_count; idx++ )
-        if ( (*msr_area)[idx].index == msr )
+    if ( type == VMX_GUEST_MSR )
+    {
+        if ( vmx_find_guest_msr(msr) != NULL )
             return 0;
+    }
+    else
+    {
+        for ( idx = 0; idx < *msr_count; idx++ )
+            if ( (*msr_area)[idx].index == msr )
+                return 0;
+    }
 
     if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
         return -ENOSPC;
@@ -1369,6 +1410,9 @@ int vmx_add_msr(u32 msr, int type)
         msr_area_elem->data = 0;
         __vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
         __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
+
+        sort(*msr_area, *msr_count, sizeof(struct vmx_msr_entry),
+             vmx_msr_entry_cmp, vmx_msr_entry_swap);
     }
     else
     {
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h 
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 6c3d7ba..d01099e 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -589,6 +589,7 @@ enum vmx_insn_errno
 
 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
 void vmx_enable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
+struct vmx_msr_entry *vmx_find_guest_msr(const u32 msr);
 int vmx_read_guest_msr(u32 msr, u64 *val);
 int vmx_write_guest_msr(u32 msr, u64 val);
 int vmx_add_msr(u32 msr, int type);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.