|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v1 3/5] x86: replace arch_vcpu::cpuid_faulting with msr_vcpu_policy
Since each vCPU now has struct msr_vcpu_policy, use cpuid_faulting bit
from there in current logic and remove arch_vcpu::cpuid_faulting.
Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
---
xen/arch/x86/cpu/intel.c | 3 ++-
xen/arch/x86/hvm/hvm.c | 4 +++-
xen/arch/x86/hvm/vmx/vmx.c | 10 ++++++----
xen/arch/x86/pv/emul-inv-op.c | 4 +++-
xen/arch/x86/pv/emul-priv-op.c | 5 +++--
xen/include/asm-x86/domain.h | 3 ---
6 files changed, 17 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 2e20327569..487eb06148 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -156,6 +156,7 @@ static void intel_ctxt_switch_levelling(const struct vcpu
*next)
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
const struct domain *nextd = next ? next->domain : NULL;
const struct cpuidmasks *masks;
+ const struct msr_vcpu_policy *vp = next->arch.msr;
if (cpu_has_cpuid_faulting) {
/*
@@ -176,7 +177,7 @@ static void intel_ctxt_switch_levelling(const struct vcpu
*next)
*/
set_cpuid_faulting(nextd && !is_control_domain(nextd) &&
(is_pv_domain(nextd) ||
- next->arch.cpuid_faulting));
+ vp->misc_features_enables.cpuid_faulting));
return;
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6cb903def5..2ad07d52bc 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3286,7 +3286,9 @@ unsigned long copy_from_user_hvm(void *to, const void
*from, unsigned len)
bool hvm_check_cpuid_faulting(struct vcpu *v)
{
- if ( !v->arch.cpuid_faulting )
+ const struct msr_vcpu_policy *vp = v->arch.msr;
+
+ if ( !vp->misc_features_enables.cpuid_faulting )
return false;
return hvm_get_cpl(v) > 0;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 67fc85b201..155fba9017 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2902,7 +2902,7 @@ static int vmx_msr_read_intercept(unsigned int msr,
uint64_t *msr_content)
case MSR_INTEL_MISC_FEATURES_ENABLES:
*msr_content = 0;
- if ( current->arch.cpuid_faulting )
+ if ( current->arch.msr->misc_features_enables.cpuid_faulting )
*msr_content |= MSR_MISC_FEATURES_CPUID_FAULTING;
break;
@@ -3134,15 +3134,17 @@ static int vmx_msr_write_intercept(unsigned int msr,
uint64_t msr_content)
case MSR_INTEL_MISC_FEATURES_ENABLES:
{
- bool old_cpuid_faulting = v->arch.cpuid_faulting;
+ struct msr_vcpu_policy *vp = v->arch.msr;
+ bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
if ( msr_content & ~MSR_MISC_FEATURES_CPUID_FAULTING )
goto gp_fault;
- v->arch.cpuid_faulting = msr_content &
MSR_MISC_FEATURES_CPUID_FAULTING;
+ vp->misc_features_enables.cpuid_faulting =
+ msr_content & MSR_MISC_FEATURES_CPUID_FAULTING;
if ( cpu_has_cpuid_faulting &&
- (old_cpuid_faulting ^ v->arch.cpuid_faulting) )
+ (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
ctxt_switch_levelling(v);
break;
}
diff --git a/xen/arch/x86/pv/emul-inv-op.c b/xen/arch/x86/pv/emul-inv-op.c
index 415d294c53..f8944170d5 100644
--- a/xen/arch/x86/pv/emul-inv-op.c
+++ b/xen/arch/x86/pv/emul-inv-op.c
@@ -66,6 +66,7 @@ static int emulate_forced_invalid_op(struct cpu_user_regs
*regs)
char sig[5], instr[2];
unsigned long eip, rc;
struct cpuid_leaf res;
+ const struct msr_vcpu_policy *vp = current->arch.msr;
eip = regs->rip;
@@ -89,7 +90,8 @@ static int emulate_forced_invalid_op(struct cpu_user_regs
*regs)
return 0;
/* If cpuid faulting is enabled and CPL>0 inject a #GP in place of #UD. */
- if ( current->arch.cpuid_faulting && !guest_kernel_mode(current, regs) )
+ if ( vp->misc_features_enables.cpuid_faulting &&
+ !guest_kernel_mode(current, regs) )
{
regs->rip = eip;
pv_inject_hw_exception(TRAP_gp_fault, regs->error_code);
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index d50f51944f..66cda538fc 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -948,7 +948,7 @@ static int priv_op_read_msr(unsigned int reg, uint64_t *val,
rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, *val) )
break;
*val = 0;
- if ( curr->arch.cpuid_faulting )
+ if ( curr->arch.msr->misc_features_enables.cpuid_faulting )
*val |= MSR_MISC_FEATURES_CPUID_FAULTING;
return X86EMUL_OKAY;
@@ -1154,7 +1154,8 @@ static int priv_op_write_msr(unsigned int reg, uint64_t
val,
if ( (val & MSR_MISC_FEATURES_CPUID_FAULTING) &&
!this_cpu(cpuid_faulting_enabled) )
break;
- curr->arch.cpuid_faulting = !!(val & MSR_MISC_FEATURES_CPUID_FAULTING);
+ curr->arch.msr->misc_features_enables.cpuid_faulting =
+ !!(val & MSR_MISC_FEATURES_CPUID_FAULTING);
return X86EMUL_OKAY;
case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7):
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 866a03b508..60c02650d5 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -555,9 +555,6 @@ struct arch_vcpu
* and thus should be saved/restored. */
bool_t nonlazy_xstate_used;
- /* Has the guest enabled CPUID faulting? */
- bool cpuid_faulting;
-
/*
* The SMAP check policy when updating runstate_guest(v) and the
* secondary system time.
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |