|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 2/3] x86/vmx: optimize vmx_read/write_guest_msr()
Replace linear scan with vmx_find_msr(). This way the time complexity
of searching for required MSR reduces from linear to logarithmic.
Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v2 --> v3:
- no changes
xen/arch/x86/hvm/vmx/vmcs.c | 26 ++++++++------------------
1 file changed, 8 insertions(+), 18 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 977106f..0dcb938 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1360,17 +1360,12 @@ struct vmx_msr_entry *vmx_find_msr(u32 msr, int type)
int vmx_read_guest_msr(u32 msr, u64 *val)
{
- struct vcpu *curr = current;
- unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
- const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+ struct vmx_msr_entry *ent;
- for ( i = 0; i < msr_count; i++ )
+ if ( (ent = vmx_find_msr(msr, VMX_GUEST_MSR)) != NULL )
{
- if ( msr_area[i].index == msr )
- {
- *val = msr_area[i].data;
- return 0;
- }
+ *val = ent->data;
+ return 0;
}
return -ESRCH;
@@ -1378,17 +1373,12 @@ int vmx_read_guest_msr(u32 msr, u64 *val)
int vmx_write_guest_msr(u32 msr, u64 val)
{
- struct vcpu *curr = current;
- unsigned int i, msr_count = curr->arch.hvm_vmx.msr_count;
- struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+ struct vmx_msr_entry *ent;
- for ( i = 0; i < msr_count; i++ )
+ if ( (ent = vmx_find_msr(msr, VMX_GUEST_MSR)) != NULL )
{
- if ( msr_area[i].index == msr )
- {
- msr_area[i].data = val;
- return 0;
- }
+ ent->data = val;
+ return 0;
}
return -ESRCH;
--
2.9.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |