|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/2] x86/msr: Rename the msr policy objects
After attempting to develop the infrastructure, it turns out that the choice
of naming is suboptimal.
Rename msr_domain_policy to just msr_policy to mirror the CPUID side of
things, and alter the 'dp' variable name convention to 'mp'. While altering
all the names, export all of the system msr_policy objects (which are already
global symbols).
Rename msr_vcpu_policy to vcpu_msrs and switch 'vp' to 'msrs' in code. Update
the arch_vcpu field name to match.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
xen/arch/x86/cpu/common.c | 4 +-
xen/arch/x86/domain.c | 10 ++---
xen/arch/x86/hvm/hvm.c | 4 +-
xen/arch/x86/hvm/svm/entry.S | 2 +-
xen/arch/x86/hvm/vmx/entry.S | 2 +-
xen/arch/x86/msr.c | 74 ++++++++++++++++++-------------------
xen/arch/x86/pv/emul-inv-op.c | 4 +-
xen/arch/x86/x86_64/asm-offsets.c | 4 +-
xen/arch/x86/x86_64/compat/entry.S | 2 +-
xen/arch/x86/x86_64/entry.S | 2 +-
xen/include/asm-x86/domain.h | 4 +-
xen/include/asm-x86/msr.h | 17 ++++-----
xen/include/asm-x86/spec_ctrl_asm.h | 2 +-
13 files changed, 64 insertions(+), 67 deletions(-)
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index e6a5922..6570c2d 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -115,7 +115,7 @@ bool __init probe_cpuid_faulting(void)
int rc;
if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
- raw_msr_domain_policy.plaform_info.cpuid_faulting =
+ raw_msr_policy.plaform_info.cpuid_faulting =
val & MSR_PLATFORM_INFO_CPUID_FAULTING;
if (rc ||
@@ -177,7 +177,7 @@ void ctxt_switch_levelling(const struct vcpu *next)
*/
set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
(is_pv_domain(nextd) ||
- next->arch.msr->
+ next->arch.msrs->
misc_features_enables.cpuid_faulting));
return;
}
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 5c10401..8b160d3 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -356,7 +356,7 @@ int vcpu_initialise(struct vcpu *v)
/* Idle domain */
v->arch.cr3 = __pa(idle_pg_table);
rc = 0;
- v->arch.msr = ZERO_BLOCK_PTR; /* Catch stray misuses */
+ v->arch.msrs = ZERO_BLOCK_PTR; /* Catch stray misuses */
}
if ( rc )
@@ -376,8 +376,8 @@ int vcpu_initialise(struct vcpu *v)
fail:
vcpu_destroy_fpu(v);
- xfree(v->arch.msr);
- v->arch.msr = NULL;
+ xfree(v->arch.msrs);
+ v->arch.msrs = NULL;
return rc;
}
@@ -389,8 +389,8 @@ void vcpu_destroy(struct vcpu *v)
vcpu_destroy_fpu(v);
- xfree(v->arch.msr);
- v->arch.msr = NULL;
+ xfree(v->arch.msrs);
+ v->arch.msrs = NULL;
if ( !is_idle_domain(v->domain) )
vpmu_destroy(v);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6a123a2..e022f5a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3350,9 +3350,9 @@ unsigned long copy_from_user_hvm(void *to, const void
*from, unsigned len)
bool hvm_check_cpuid_faulting(struct vcpu *v)
{
- const struct msr_vcpu_policy *vp = v->arch.msr;
+ const struct vcpu_msrs *msrs = v->arch.msrs;
- if ( !vp->misc_features_enables.cpuid_faulting )
+ if ( !msrs->misc_features_enables.cpuid_faulting )
return false;
return hvm_get_cpl(v) > 0;
diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index 2d540f9..7d73a69 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -64,7 +64,7 @@ __UNLIKELY_END(nsvm_hap)
mov %rsp, %rdi
call svm_vmenter_helper
- mov VCPU_arch_msr(%rbx), %rax
+ mov VCPU_arch_msrs(%rbx), %rax
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
index afd552f..a7d1356 100644
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -81,7 +81,7 @@ UNLIKELY_END(realmode)
test %al, %al
jz .Lvmx_vmentry_restart
- mov VCPU_arch_msr(%rbx), %rax
+ mov VCPU_arch_msrs(%rbx), %rax
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 27c4915..6cbda44 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -26,13 +26,13 @@
DEFINE_PER_CPU(uint32_t, tsc_aux);
-struct msr_domain_policy __read_mostly raw_msr_domain_policy,
- __read_mostly host_msr_domain_policy,
- __read_mostly hvm_max_msr_domain_policy,
- __read_mostly pv_max_msr_domain_policy;
+struct msr_policy __read_mostly raw_msr_policy,
+ __read_mostly host_msr_policy,
+ __read_mostly hvm_max_msr_policy,
+ __read_mostly pv_max_msr_policy;
-struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy,
- __read_mostly pv_max_msr_vcpu_policy;
+struct vcpu_msrs __read_mostly hvm_max_vcpu_msrs,
+ __read_mostly pv_max_vcpu_msrs;
static void __init calculate_raw_policy(void)
{
@@ -42,33 +42,33 @@ static void __init calculate_raw_policy(void)
static void __init calculate_host_policy(void)
{
- struct msr_domain_policy *dp = &host_msr_domain_policy;
+ struct msr_policy *mp = &host_msr_policy;
- *dp = raw_msr_domain_policy;
+ *mp = raw_msr_policy;
/* 0x000000ce MSR_INTEL_PLATFORM_INFO */
/* probe_cpuid_faulting() sanity checks presence of MISC_FEATURES_ENABLES
*/
- dp->plaform_info.cpuid_faulting = cpu_has_cpuid_faulting;
+ mp->plaform_info.cpuid_faulting = cpu_has_cpuid_faulting;
}
static void __init calculate_hvm_max_policy(void)
{
- struct msr_domain_policy *dp = &hvm_max_msr_domain_policy;
+ struct msr_policy *mp = &hvm_max_msr_policy;
if ( !hvm_enabled )
return;
- *dp = host_msr_domain_policy;
+ *mp = host_msr_policy;
/* It's always possible to emulate CPUID faulting for HVM guests */
- dp->plaform_info.cpuid_faulting = true;
+ mp->plaform_info.cpuid_faulting = true;
}
static void __init calculate_pv_max_policy(void)
{
- struct msr_domain_policy *dp = &pv_max_msr_domain_policy;
+ struct msr_policy *mp = &pv_max_msr_policy;
- *dp = host_msr_domain_policy;
+ *mp = host_msr_policy;
}
void __init init_guest_msr_policy(void)
@@ -81,18 +81,18 @@ void __init init_guest_msr_policy(void)
int init_domain_msr_policy(struct domain *d)
{
- struct msr_domain_policy *dp =
- xmemdup(is_pv_domain(d) ? & pv_max_msr_domain_policy
- : &hvm_max_msr_domain_policy);
+ struct msr_policy *mp =
+ xmemdup(is_pv_domain(d) ? & pv_max_msr_policy
+ : &hvm_max_msr_policy);
- if ( !dp )
+ if ( !mp )
return -ENOMEM;
/* See comment in intel_ctxt_switch_levelling() */
if ( is_control_domain(d) )
- dp->plaform_info.cpuid_faulting = false;
+ mp->plaform_info.cpuid_faulting = false;
- d->arch.msr = dp;
+ d->arch.msr = mp;
return 0;
}
@@ -100,14 +100,14 @@ int init_domain_msr_policy(struct domain *d)
int init_vcpu_msr_policy(struct vcpu *v)
{
struct domain *d = v->domain;
- struct msr_vcpu_policy *vp =
- xmemdup(is_pv_domain(d) ? & pv_max_msr_vcpu_policy
- : &hvm_max_msr_vcpu_policy);
+ struct vcpu_msrs *msrs =
+ xmemdup(is_pv_domain(d) ? & pv_max_vcpu_msrs
+ : &hvm_max_vcpu_msrs);
- if ( !vp )
+ if ( !msrs )
return -ENOMEM;
- v->arch.msr = vp;
+ v->arch.msrs = msrs;
return 0;
}
@@ -115,8 +115,8 @@ int init_vcpu_msr_policy(struct vcpu *v)
int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
{
const struct cpuid_policy *cp = v->domain->arch.cpuid;
- const struct msr_domain_policy *dp = v->domain->arch.msr;
- const struct msr_vcpu_policy *vp = v->arch.msr;
+ const struct msr_policy *mp = v->domain->arch.msr;
+ const struct vcpu_msrs *msrs = v->arch.msrs;
switch ( msr )
{
@@ -129,11 +129,11 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
case MSR_SPEC_CTRL:
if ( !cp->feat.ibrsb )
goto gp_fault;
- *val = vp->spec_ctrl.raw;
+ *val = msrs->spec_ctrl.raw;
break;
case MSR_INTEL_PLATFORM_INFO:
- *val = dp->plaform_info.raw;
+ *val = mp->plaform_info.raw;
break;
case MSR_ARCH_CAPABILITIES:
@@ -141,7 +141,7 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
goto gp_fault;
case MSR_INTEL_MISC_FEATURES_ENABLES:
- *val = vp->misc_features_enables.raw;
+ *val = msrs->misc_features_enables.raw;
break;
default:
@@ -159,8 +159,8 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
const struct vcpu *curr = current;
struct domain *d = v->domain;
const struct cpuid_policy *cp = d->arch.cpuid;
- struct msr_domain_policy *dp = d->arch.msr;
- struct msr_vcpu_policy *vp = v->arch.msr;
+ const struct msr_policy *mp = d->arch.msr;
+ struct vcpu_msrs *msrs = v->arch.msrs;
switch ( msr )
{
@@ -207,7 +207,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
if ( val & rsvd )
goto gp_fault; /* Rsvd bit set? */
- vp->spec_ctrl.raw = val;
+ msrs->spec_ctrl.raw = val;
break;
case MSR_PRED_CMD:
@@ -223,19 +223,19 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t
val)
case MSR_INTEL_MISC_FEATURES_ENABLES:
{
- bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
+ bool old_cpuid_faulting = msrs->misc_features_enables.cpuid_faulting;
rsvd = ~0ull;
- if ( dp->plaform_info.cpuid_faulting )
+ if ( mp->plaform_info.cpuid_faulting )
rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
if ( val & rsvd )
goto gp_fault;
- vp->misc_features_enables.raw = val;
+ msrs->misc_features_enables.raw = val;
if ( v == curr && is_hvm_domain(d) && cpu_has_cpuid_faulting &&
- (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
+ (old_cpuid_faulting ^ msrs->misc_features_enables.cpuid_faulting)
)
ctxt_switch_levelling(v);
break;
}
diff --git a/xen/arch/x86/pv/emul-inv-op.c b/xen/arch/x86/pv/emul-inv-op.c
index be95a5f..56f5a45 100644
--- a/xen/arch/x86/pv/emul-inv-op.c
+++ b/xen/arch/x86/pv/emul-inv-op.c
@@ -71,7 +71,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs
*regs)
char sig[5], instr[2];
unsigned long eip, rc;
struct cpuid_leaf res;
- const struct msr_vcpu_policy *vp = current->arch.msr;
+ const struct vcpu_msrs *msrs = current->arch.msrs;
eip = regs->rip;
@@ -95,7 +95,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs
*regs)
return 0;
/* If cpuid faulting is enabled and CPL>0 inject a #GP in place of #UD. */
- if ( vp->misc_features_enables.cpuid_faulting &&
+ if ( msrs->misc_features_enables.cpuid_faulting &&
!guest_kernel_mode(current, regs) )
{
regs->rip = eip;
diff --git a/xen/arch/x86/x86_64/asm-offsets.c
b/xen/arch/x86/x86_64/asm-offsets.c
index 934a8aa..18077af 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -76,7 +76,7 @@ void __dummy__(void)
OFFSET(VCPU_iopl, struct vcpu, arch.pv_vcpu.iopl);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
OFFSET(VCPU_cr3, struct vcpu, arch.cr3);
- OFFSET(VCPU_arch_msr, struct vcpu, arch.msr);
+ OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
@@ -139,7 +139,7 @@ void __dummy__(void)
OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
BLANK();
- OFFSET(VCPUMSR_spec_ctrl_raw, struct msr_vcpu_policy, spec_ctrl.raw);
+ OFFSET(VCPUMSR_spec_ctrl_raw, struct vcpu_msrs, spec_ctrl.raw);
BLANK();
#ifdef CONFIG_PERF_COUNTERS
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index d737583..c23ad7a 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -154,7 +154,7 @@ ENTRY(compat_restore_all_guest)
or $X86_EFLAGS_IF,%r11
mov %r11d,UREGS_eflags(%rsp)
- mov VCPU_arch_msr(%rbx), %rax
+ mov VCPU_arch_msrs(%rbx), %rax
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index e5c9ef1..d6310f1 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -137,7 +137,7 @@ restore_all_guest:
ASSERT_INTERRUPTS_DISABLED
/* Stash guest SPEC_CTRL value while we can read struct vcpu. */
- mov VCPU_arch_msr(%rbx), %rdx
+ mov VCPU_arch_msrs(%rbx), %rdx
mov VCPUMSR_spec_ctrl_raw(%rdx), %r15d
/* Copy guest mappings and switch to per-CPU root page table. */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 5a6332c..e2442f4 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -367,7 +367,7 @@ struct arch_domain
/* CPUID and MSR policy objects. */
struct cpuid_policy *cpuid;
- struct msr_domain_policy *msr;
+ struct msr_policy *msr;
struct PITState vpit;
@@ -582,7 +582,7 @@ struct arch_vcpu
struct arch_vm_event *vm_event;
- struct msr_vcpu_policy *msr;
+ struct vcpu_msrs *msrs;
struct {
bool next_interrupt_enabled;
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index d4064f4..3a2c799 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -258,7 +258,7 @@ static inline void wrmsr_tsc_aux(uint32_t val)
}
/* MSR policy object for shared per-domain MSRs */
-struct msr_domain_policy
+struct msr_policy
{
/*
* 0x000000ce - MSR_INTEL_PLATFORM_INFO
@@ -277,16 +277,13 @@ struct msr_domain_policy
} plaform_info;
};
-/* RAW msr domain policy: contains the actual values from H/W MSRs */
-extern struct msr_domain_policy raw_msr_domain_policy;
-/*
- * HOST msr domain policy: features that Xen actually decided to use,
- * a subset of RAW policy.
- */
-extern struct msr_domain_policy host_msr_domain_policy;
+extern struct msr_policy raw_msr_policy,
+ host_msr_policy,
+ hvm_max_msr_policy,
+ pv_max_msr_policy;
-/* MSR policy object for per-vCPU MSRs */
-struct msr_vcpu_policy
+/* Container object for per-vCPU MSRs */
+struct vcpu_msrs
{
/* 0x00000048 - MSR_SPEC_CTRL */
struct {
diff --git a/xen/include/asm-x86/spec_ctrl_asm.h
b/xen/include/asm-x86/spec_ctrl_asm.h
index edace2a..686a0f7 100644
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -133,7 +133,7 @@
rdmsr
/* Stash the value from hardware. */
- mov VCPU_arch_msr(%rbx), %rdx
+ mov VCPU_arch_msrs(%rbx), %rdx
mov %eax, VCPUMSR_spec_ctrl_raw(%rdx)
xor %edx, %edx
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |