[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 1/5] x86/hvm: Handle viridian MSRs via the new guest_{rd, wr}msr() infrastructure
Dispatch from the guest_{rd,wr}msr() functions, after confirming that the domain is configured to use viridian. This allows for simplifiction of the viridian helpers as they don't need to cope with the "not a viridian MSR" case. It also means that viridian MSRs which are unimplemented, or excluded because of features, don't fall back into default handling path. Rename {rd,wr}msr_viridian_regs() to guest_{rd,wr}msr_viridian() for consistency, and because the _regs suffix isn't very appropriate. Update them to take a vcpu pointer rather than presuming that they act on current, which is safe for all implemented operations. Also update them to use X86EMUL_* return values. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Reviewed-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> CC: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx> v2: * Introduce some constants for the hypervisor range. However, I do not think the current code is an improvement over bare numbers. --- xen/arch/x86/hvm/svm/svm.c | 6 +---- xen/arch/x86/hvm/viridian.c | 52 +++++++++++++++++--------------------- xen/arch/x86/hvm/vmx/vmx.c | 6 +---- xen/arch/x86/msr.c | 41 +++++++++++++++++++++++++++--- xen/include/asm-x86/hvm/viridian.h | 11 ++------ xen/include/asm-x86/msr-index.h | 4 +++ 6 files changed, 68 insertions(+), 52 deletions(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index c34f5b5..e8ac2d8 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1966,8 +1966,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) else if ( ret ) break; - if ( rdmsr_viridian_regs(msr, msr_content) || - rdmsr_hypervisor_regs(msr, msr_content) ) + if ( rdmsr_hypervisor_regs(msr, msr_content) ) break; if ( rdmsr_safe(msr, *msr_content) == 0 ) @@ -2122,9 +2121,6 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) else if ( ret ) break; - if ( wrmsr_viridian_regs(msr, msr_content) ) - break; - switch ( wrmsr_hypervisor_regs(msr, msr_content) ) { case -ERESTART: diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c index 70aab52..074d920 100644 --- a/xen/arch/x86/hvm/viridian.c +++ b/xen/arch/x86/hvm/viridian.c @@ -88,9 +88,6 @@ #define HV_X64_MSR_CRASH_P4 0x40000104 #define HV_X64_MSR_CRASH_CTL 0x40000105 -#define VIRIDIAN_MSR_MIN HV_X64_MSR_GUEST_OS_ID -#define VIRIDIAN_MSR_MAX HV_X64_MSR_CRASH_CTL - /* Viridian Hypercall Status Codes. */ #define HV_STATUS_SUCCESS 0x0000 #define HV_STATUS_INVALID_HYPERCALL_CODE 0x0002 @@ -554,13 +551,11 @@ static void update_reference_tsc(struct domain *d, bool_t initialize) put_page_and_type(page); } -int wrmsr_viridian_regs(uint32_t idx, uint64_t val) +int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val) { - struct vcpu *v = current; struct domain *d = v->domain; - if ( !is_viridian_domain(d) ) - return 0; + ASSERT(is_viridian_domain(d)); switch ( idx ) { @@ -615,7 +610,7 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val) case HV_X64_MSR_REFERENCE_TSC: if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) ) - return 0; + goto gp_fault; perfc_incr(mshv_wrmsr_tsc_msr); d->arch.hvm_domain.viridian.reference_tsc.raw = val; @@ -655,14 +650,15 @@ int wrmsr_viridian_regs(uint32_t idx, uint64_t val) } default: - if ( idx >= VIRIDIAN_MSR_MIN && idx <= VIRIDIAN_MSR_MAX ) - gprintk(XENLOG_WARNING, "write to unimplemented MSR %#x\n", - idx); - - return 0; + gdprintk(XENLOG_WARNING, + "Write %016"PRIx64" to unimplemented MSR %#x\n", val, idx); + goto gp_fault; } - return 1; + return X86EMUL_OKAY; + + gp_fault: + return X86EMUL_EXCEPTION; } static int64_t raw_trc_val(struct domain *d) @@ -698,13 +694,11 @@ void viridian_time_ref_count_thaw(struct domain *d) trc->off = (int64_t)trc->val - raw_trc_val(d); } -int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) +int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val) { - struct vcpu *v = current; struct domain *d = v->domain; - - if ( !is_viridian_domain(d) ) - return 0; + + ASSERT(is_viridian_domain(d)); switch ( idx ) { @@ -725,7 +719,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) case HV_X64_MSR_TSC_FREQUENCY: if ( viridian_feature_mask(d) & HVMPV_no_freq ) - return 0; + goto gp_fault; perfc_incr(mshv_rdmsr_tsc_frequency); *val = (uint64_t)d->arch.tsc_khz * 1000ull; @@ -733,7 +727,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) case HV_X64_MSR_APIC_FREQUENCY: if ( viridian_feature_mask(d) & HVMPV_no_freq ) - return 0; + goto gp_fault; perfc_incr(mshv_rdmsr_apic_frequency); *val = 1000000000ull / APIC_BUS_CYCLE_NS; @@ -757,7 +751,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) case HV_X64_MSR_REFERENCE_TSC: if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) ) - return 0; + goto gp_fault; perfc_incr(mshv_rdmsr_tsc_msr); *val = d->arch.hvm_domain.viridian.reference_tsc.raw; @@ -770,7 +764,7 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) trc = &d->arch.hvm_domain.viridian.time_ref_count; if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) ) - return 0; + goto gp_fault; if ( !test_and_set_bit(_TRC_accessed, &trc->flags) ) printk(XENLOG_G_INFO "d%d: VIRIDIAN MSR_TIME_REF_COUNT: accessed\n", @@ -804,14 +798,14 @@ int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) } default: - if ( idx >= VIRIDIAN_MSR_MIN && idx <= VIRIDIAN_MSR_MAX ) - gprintk(XENLOG_WARNING, "read from unimplemented MSR %#x\n", - idx); - - return 0; + gdprintk(XENLOG_WARNING, "Read from unimplemented MSR %#x\n", idx); + goto gp_fault; } - return 1; + return X86EMUL_OKAY; + + gp_fault: + return X86EMUL_EXCEPTION; } void viridian_vcpu_deinit(struct vcpu *v) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 18d8ce2..5fc21a5 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2909,8 +2909,7 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) break; } - if ( rdmsr_viridian_regs(msr, msr_content) || - rdmsr_hypervisor_regs(msr, msr_content) ) + if ( rdmsr_hypervisor_regs(msr, msr_content) ) break; if ( rdmsr_safe(msr, *msr_content) == 0 ) @@ -3150,9 +3149,6 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) if ( passive_domain_do_wrmsr(msr, msr_content) ) return X86EMUL_OKAY; - if ( wrmsr_viridian_regs(msr, msr_content) ) - break; - if ( vmx_write_guest_msr(msr, msr_content) == 0 || is_last_branch_msr(msr) ) break; diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c index 8ae3b4e..2fc2021 100644 --- a/xen/arch/x86/msr.c +++ b/xen/arch/x86/msr.c @@ -141,9 +141,11 @@ int init_vcpu_msr_policy(struct vcpu *v) int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) { - const struct cpuid_policy *cp = v->domain->arch.cpuid; - const struct msr_domain_policy *dp = v->domain->arch.msr; + const struct domain *d = v->domain; + const struct cpuid_policy *cp = d->arch.cpuid; + const struct msr_domain_policy *dp = d->arch.msr; const struct msr_vcpu_policy *vp = v->arch.msr; + int ret = X86EMUL_OKAY; switch ( msr ) { @@ -175,11 +177,26 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) _MSR_MISC_FEATURES_CPUID_FAULTING; break; + case MSR_HYPERVISOR_START ... MSR_HYPERVISOR_START + NR_VIRIDIAN_MSRS - 1: + if ( is_viridian_domain(d) ) + { + ret = guest_rdmsr_viridian(v, msr, val); + goto out; + } + + /* Fallthrough. */ default: return X86EMUL_UNHANDLEABLE; } - return X86EMUL_OKAY; + out: + /* + * Check that functions we dispatch to don't end up returning + * X86EMUL_UNHANDLEABLE, as that interferes with the transitionary period + * meaning of "fall back to the legacy MSR handlers". + */ + ASSERT(ret != X86EMUL_UNHANDLEABLE); + return ret; gp_fault: return X86EMUL_EXCEPTION; @@ -192,6 +209,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) const struct cpuid_policy *cp = d->arch.cpuid; struct msr_domain_policy *dp = d->arch.msr; struct msr_vcpu_policy *vp = v->arch.msr; + int ret = X86EMUL_OKAY; switch ( msr ) { @@ -250,11 +268,26 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) break; } + case MSR_HYPERVISOR_START ... MSR_HYPERVISOR_START + NR_VIRIDIAN_MSRS - 1: + if ( is_viridian_domain(d) ) + { + ret = guest_wrmsr_viridian(v, msr, val); + goto out; + } + + /* Fallthrough. */ default: return X86EMUL_UNHANDLEABLE; } - return X86EMUL_OKAY; + out: + /* + * Check that functions we dispatch to don't end up returning + * X86EMUL_UNHANDLEABLE, as that interferes with the transitionary period + * meaning of "fall back to the legacy MSR handlers". + */ + ASSERT(ret != X86EMUL_UNHANDLEABLE); + return ret; gp_fault: return X86EMUL_EXCEPTION; diff --git a/xen/include/asm-x86/hvm/viridian.h b/xen/include/asm-x86/hvm/viridian.h index 4cbd133..071fb44 100644 --- a/xen/include/asm-x86/hvm/viridian.h +++ b/xen/include/asm-x86/hvm/viridian.h @@ -101,15 +101,8 @@ struct viridian_domain void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf, uint32_t subleaf, struct cpuid_leaf *res); -int -wrmsr_viridian_regs( - uint32_t idx, - uint64_t val); - -int -rdmsr_viridian_regs( - uint32_t idx, - uint64_t *val); +int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val); +int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val); int viridian_hypercall(struct cpu_user_regs *regs); diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h index 23ad743..ee029a2 100644 --- a/xen/include/asm-x86/msr-index.h +++ b/xen/include/asm-x86/msr-index.h @@ -540,4 +540,8 @@ #define MSR_PKGC9_IRTL 0x00000634 #define MSR_PKGC10_IRTL 0x00000635 +/* Hypervisor leaves in the 0x4xxxxxxx range. */ +#define MSR_HYPERVISOR_START 0x40000000 +#define NR_VIRIDIAN_MSRS 0x00000200 + #endif /* __ASM_MSR_INDEX_H */ -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |