|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 1/9] x86/vmx: API improvements for MSR load/save infrastructure
Collect together related infrastructure in vmcs.h, rather than having it
spread out. Turn vmx_{read,write}_guest_msr() into static inlines, as they
are simple enough.
Replace 'int type' with 'enum vmx_msr_list_type', and use switch statements
internally. Later changes are going to introduce a new type.
Rename the type identifiers for consistency with the other VMX_MSR_*
constants.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
v2:
* Extra const
* Document the behaviour of the MSR list types
---
xen/arch/x86/hvm/vmx/vmcs.c | 93 +++++++++++++++++---------------------
xen/arch/x86/hvm/vmx/vmx.c | 8 ++--
xen/include/asm-x86/hvm/vmx/vmcs.h | 62 ++++++++++++++++++-------
3 files changed, 91 insertions(+), 72 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 884c672..7f9dd48 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1283,22 +1283,26 @@ static int vmx_msr_entry_key_cmp(const void *key, const
void *elt)
return 0;
}
-struct vmx_msr_entry *vmx_find_msr(u32 msr, int type)
+struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type)
{
struct vcpu *curr = current;
unsigned int msr_count;
- struct vmx_msr_entry *msr_area;
+ struct vmx_msr_entry *msr_area = NULL;
- if ( type == VMX_GUEST_MSR )
+ switch ( type )
{
- msr_count = curr->arch.hvm_vmx.msr_count;
- msr_area = curr->arch.hvm_vmx.msr_area;
- }
- else
- {
- ASSERT(type == VMX_HOST_MSR);
+ case VMX_MSR_HOST:
msr_count = curr->arch.hvm_vmx.host_msr_count;
msr_area = curr->arch.hvm_vmx.host_msr_area;
+ break;
+
+ case VMX_MSR_GUEST:
+ msr_count = curr->arch.hvm_vmx.msr_count;
+ msr_area = curr->arch.hvm_vmx.msr_area;
+ break;
+
+ default:
+ ASSERT_UNREACHABLE();
}
if ( msr_area == NULL )
@@ -1308,48 +1312,27 @@ struct vmx_msr_entry *vmx_find_msr(u32 msr, int type)
vmx_msr_entry_key_cmp);
}
-int vmx_read_guest_msr(u32 msr, u64 *val)
-{
- struct vmx_msr_entry *ent;
-
- if ( (ent = vmx_find_msr(msr, VMX_GUEST_MSR)) != NULL )
- {
- *val = ent->data;
- return 0;
- }
-
- return -ESRCH;
-}
-
-int vmx_write_guest_msr(u32 msr, u64 val)
-{
- struct vmx_msr_entry *ent;
-
- if ( (ent = vmx_find_msr(msr, VMX_GUEST_MSR)) != NULL )
- {
- ent->data = val;
- return 0;
- }
-
- return -ESRCH;
-}
-
-int vmx_add_msr(u32 msr, int type)
+int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type)
{
struct vcpu *curr = current;
unsigned int idx, *msr_count;
struct vmx_msr_entry **msr_area, *msr_area_elem;
- if ( type == VMX_GUEST_MSR )
+ switch ( type )
{
- msr_count = &curr->arch.hvm_vmx.msr_count;
- msr_area = &curr->arch.hvm_vmx.msr_area;
- }
- else
- {
- ASSERT(type == VMX_HOST_MSR);
+ case VMX_MSR_HOST:
msr_count = &curr->arch.hvm_vmx.host_msr_count;
msr_area = &curr->arch.hvm_vmx.host_msr_area;
+ break;
+
+ case VMX_MSR_GUEST:
+ msr_count = &curr->arch.hvm_vmx.msr_count;
+ msr_area = &curr->arch.hvm_vmx.msr_area;
+ break;
+
+ default:
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
}
if ( *msr_area == NULL )
@@ -1357,13 +1340,17 @@ int vmx_add_msr(u32 msr, int type)
if ( (*msr_area = alloc_xenheap_page()) == NULL )
return -ENOMEM;
- if ( type == VMX_GUEST_MSR )
+ switch ( type )
{
+ case VMX_MSR_HOST:
+ __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+ break;
+
+ case VMX_MSR_GUEST:
__vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area));
__vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+ break;
}
- else
- __vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
}
for ( idx = 0; idx < *msr_count && (*msr_area)[idx].index <= msr; idx++ )
@@ -1382,16 +1369,18 @@ int vmx_add_msr(u32 msr, int type)
++*msr_count;
- if ( type == VMX_GUEST_MSR )
+ switch ( type )
{
+ case VMX_MSR_HOST:
+ rdmsrl(msr, msr_area_elem->data);
+ __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
+ break;
+
+ case VMX_MSR_GUEST:
msr_area_elem->data = 0;
__vmwrite(VM_EXIT_MSR_STORE_COUNT, *msr_count);
__vmwrite(VM_ENTRY_MSR_LOAD_COUNT, *msr_count);
- }
- else
- {
- rdmsrl(msr, msr_area_elem->data);
- __vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
+ break;
}
return 0;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 4318b15..bfabcab 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4160,7 +4160,7 @@ static void lbr_tsx_fixup(void)
struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
struct vmx_msr_entry *msr;
- if ( (msr = vmx_find_msr(lbr_from_start, VMX_GUEST_MSR)) != NULL )
+ if ( (msr = vmx_find_msr(lbr_from_start, VMX_MSR_GUEST)) != NULL )
{
/*
* Sign extend into bits 61:62 while preserving bit 63
@@ -4170,7 +4170,7 @@ static void lbr_tsx_fixup(void)
msr->data |= ((LBR_FROM_SIGNEXT_2MSB & msr->data) << 2);
}
- if ( (msr = vmx_find_msr(lbr_lastint_from, VMX_GUEST_MSR)) != NULL )
+ if ( (msr = vmx_find_msr(lbr_lastint_from, VMX_MSR_GUEST)) != NULL )
msr->data |= ((LBR_FROM_SIGNEXT_2MSB & msr->data) << 2);
}
@@ -4193,8 +4193,8 @@ static void bdw_erratum_bdf14_fixup(void)
* erratum BDF14. Fix up MSR_IA32_LASTINT{FROM,TO}IP by
* sign-extending into bits 48:63.
*/
- sign_extend_msr(MSR_IA32_LASTINTFROMIP, VMX_GUEST_MSR);
- sign_extend_msr(MSR_IA32_LASTINTTOIP, VMX_GUEST_MSR);
+ sign_extend_msr(MSR_IA32_LASTINTFROMIP, VMX_MSR_GUEST);
+ sign_extend_msr(MSR_IA32_LASTINTTOIP, VMX_MSR_GUEST);
}
static void lbr_fixup(void)
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h
b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 06c3179..20882d1 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -514,9 +514,6 @@ enum vmcs_field {
#define VMCS_VPID_WIDTH 16
-#define VMX_GUEST_MSR 0
-#define VMX_HOST_MSR 1
-
/* VM Instruction error numbers */
enum vmx_insn_errno
{
@@ -534,6 +531,52 @@ enum vmx_insn_errno
VMX_INSN_FAIL_INVALID = ~0,
};
+/* MSR load/save list infrastructure. */
+enum vmx_msr_list_type {
+ VMX_MSR_HOST, /* MSRs loaded on VMExit. */
+ VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */
+};
+
+int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type);
+
+static inline int vmx_add_host_load_msr(uint32_t msr)
+{
+ return vmx_add_msr(msr, VMX_MSR_HOST);
+}
+
+static inline int vmx_add_guest_msr(uint32_t msr)
+{
+ return vmx_add_msr(msr, VMX_MSR_GUEST);
+}
+
+struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type);
+
+static inline int vmx_read_guest_msr(uint32_t msr, uint64_t *val)
+{
+ const struct vmx_msr_entry *ent = vmx_find_msr(msr, VMX_MSR_GUEST);
+
+ if ( !ent )
+ return -ESRCH;
+
+ *val = ent->data;
+
+ return 0;
+}
+
+static inline int vmx_write_guest_msr(uint32_t msr, uint64_t val)
+{
+ struct vmx_msr_entry *ent = vmx_find_msr(msr, VMX_MSR_GUEST);
+
+ if ( !ent )
+ return -ESRCH;
+
+ ent->data = val;
+
+ return 0;
+}
+
+
+/* MSR intercept bitmap infrastructure. */
enum vmx_msr_intercept_type {
VMX_MSR_R = 1,
VMX_MSR_W = 2,
@@ -544,10 +587,6 @@ void vmx_clear_msr_intercept(struct vcpu *v, unsigned int
msr,
enum vmx_msr_intercept_type type);
void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
enum vmx_msr_intercept_type type);
-int vmx_read_guest_msr(u32 msr, u64 *val);
-int vmx_write_guest_msr(u32 msr, u64 val);
-struct vmx_msr_entry *vmx_find_msr(u32 msr, int type);
-int vmx_add_msr(u32 msr, int type);
void vmx_vmcs_switch(paddr_t from, paddr_t to);
void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
@@ -562,15 +601,6 @@ void virtual_vmcs_vmwrite(const struct vcpu *, u32
encoding, u64 val);
enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v,
u32 vmcs_encoding, u64 val);
-static inline int vmx_add_guest_msr(u32 msr)
-{
- return vmx_add_msr(msr, VMX_GUEST_MSR);
-}
-static inline int vmx_add_host_load_msr(u32 msr)
-{
- return vmx_add_msr(msr, VMX_HOST_MSR);
-}
-
DECLARE_PER_CPU(bool_t, vmxon);
bool_t vmx_vcpu_pml_enabled(const struct vcpu *v);
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |