[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86/vMCE: Add AMD support
# HG changeset patch # User Christoph Egger <Christoph.Egger@xxxxxxx> # Date 1348654062 -7200 # Node ID 5d63c633a60b9a1d695594f9c17cf933240bec81 # Parent 07d0d5b3a0054d2534f09bcf90437678df2bfc54 x86/vMCE: Add AMD support Add vMCE support for AMD. Add vmce namespace to Intel specific vMCE MSR functions. Move vMCE prototypes from mce.h to vmce.h. Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx> - fix inverted return values from vmce_amd_{rd,wr}msr() - remove bogus printk()-s from those functions Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Committed-by: Jan Beulich <jbeulich@xxxxxxxx> --- diff -r 07d0d5b3a005 -r 5d63c633a60b xen/arch/x86/cpu/mcheck/amd_f10.c --- a/xen/arch/x86/cpu/mcheck/amd_f10.c Wed Sep 26 12:05:55 2012 +0200 +++ b/xen/arch/x86/cpu/mcheck/amd_f10.c Wed Sep 26 12:07:42 2012 +0200 @@ -104,3 +104,28 @@ enum mcheck_type amd_f10_mcheck_init(str return mcheck_amd_famXX; } + +/* amd specific MCA MSR */ +int vmce_amd_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) +{ + switch (msr) { + case MSR_F10_MC4_MISC1: + case MSR_F10_MC4_MISC2: + case MSR_F10_MC4_MISC3: + break; + } + + return 1; +} + +int vmce_amd_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) +{ + switch (msr) { + case MSR_F10_MC4_MISC1: + case MSR_F10_MC4_MISC2: + case MSR_F10_MC4_MISC3: + break; + } + + return 1; +} diff -r 07d0d5b3a005 -r 5d63c633a60b xen/arch/x86/cpu/mcheck/amd_nonfatal.c --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c Wed Sep 26 12:05:55 2012 +0200 +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c Wed Sep 26 12:07:42 2012 +0200 @@ -64,6 +64,7 @@ #include <asm/msr.h> #include "mce.h" +#include "vmce.h" static struct timer mce_timer; diff -r 07d0d5b3a005 -r 5d63c633a60b xen/arch/x86/cpu/mcheck/mce.c --- a/xen/arch/x86/cpu/mcheck/mce.c Wed Sep 26 12:05:55 2012 +0200 +++ b/xen/arch/x86/cpu/mcheck/mce.c Wed Sep 26 12:07:42 2012 +0200 @@ -25,6 +25,7 @@ #include "mce.h" #include "barrier.h" #include "util.h" +#include "vmce.h" bool_t __read_mostly mce_disabled; invbool_param("mce", mce_disabled); diff -r 07d0d5b3a005 -r 5d63c633a60b xen/arch/x86/cpu/mcheck/mce.h --- a/xen/arch/x86/cpu/mcheck/mce.h Wed Sep 26 12:05:55 2012 +0200 +++ b/xen/arch/x86/cpu/mcheck/mce.h Wed Sep 26 12:07:42 2012 +0200 @@ -49,15 +49,9 @@ void intel_mcheck_timer(struct cpuinfo_x void mce_intel_feature_init(struct cpuinfo_x86 *c); void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c); -int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d); -int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn); - -u64 mce_cap_init(void); +uint64_t mce_cap_init(void); extern unsigned int firstbank; -int intel_mce_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); -int intel_mce_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); - struct mcinfo_extended *intel_get_extended_msrs( struct mcinfo_global *mig, struct mc_info *mi); @@ -69,9 +63,6 @@ void mc_panic(char *s); void x86_mc_get_cpu_info(unsigned, uint32_t *, uint16_t *, uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *); -#define dom0_vmce_enabled() (dom0 && dom0->max_vcpus && dom0->vcpu[0] \ - && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA)) - /* Register a handler for machine check exceptions. */ typedef void (*x86_mce_vector_t)(struct cpu_user_regs *, long); extern void x86_mce_vector_register(x86_mce_vector_t); @@ -166,10 +157,6 @@ void *x86_mcinfo_add(struct mc_info *mi, void *x86_mcinfo_reserve(struct mc_info *mi, int size); void x86_mcinfo_dump(struct mc_info *mi); -int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d, - uint64_t gstatus); -int inject_vmce(struct domain *d, int vcpu); - static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr) { switch (boot_cpu_data.x86_vendor) { diff -r 07d0d5b3a005 -r 5d63c633a60b xen/arch/x86/cpu/mcheck/mce_intel.c --- a/xen/arch/x86/cpu/mcheck/mce_intel.c Wed Sep 26 12:05:55 2012 +0200 +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Wed Sep 26 12:07:42 2012 +0200 @@ -18,6 +18,7 @@ #include "x86_mca.h" #include "barrier.h" #include "util.h" +#include "vmce.h" DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned); DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks); @@ -980,7 +981,7 @@ enum mcheck_type intel_mcheck_init(struc } /* intel specific MCA MSR */ -int intel_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) +int vmce_intel_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) { unsigned int bank = msr - MSR_IA32_MC0_CTL2; @@ -993,7 +994,7 @@ int intel_mce_wrmsr(struct vcpu *v, uint return 1; } -int intel_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) +int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) { unsigned int bank = msr - MSR_IA32_MC0_CTL2; diff -r 07d0d5b3a005 -r 5d63c633a60b xen/arch/x86/cpu/mcheck/non-fatal.c --- a/xen/arch/x86/cpu/mcheck/non-fatal.c Wed Sep 26 12:05:55 2012 +0200 +++ b/xen/arch/x86/cpu/mcheck/non-fatal.c Wed Sep 26 12:07:42 2012 +0200 @@ -21,6 +21,7 @@ #include <asm/msr.h> #include "mce.h" +#include "vmce.h" DEFINE_PER_CPU(struct mca_banks *, poll_bankmask); static struct timer mce_timer; diff -r 07d0d5b3a005 -r 5d63c633a60b xen/arch/x86/cpu/mcheck/vmce.c --- a/xen/arch/x86/cpu/mcheck/vmce.c Wed Sep 26 12:05:55 2012 +0200 +++ b/xen/arch/x86/cpu/mcheck/vmce.c Wed Sep 26 12:07:42 2012 +0200 @@ -33,8 +33,10 @@ #include <asm/system.h> #include <asm/msr.h> #include <asm/p2m.h> + #include "mce.h" #include "x86_mca.h" +#include "vmce.h" /* * MCG_SER_P: software error recovery supported @@ -143,7 +145,10 @@ static int bank_mce_rdmsr(const struct v switch ( boot_cpu_data.x86_vendor ) { case X86_VENDOR_INTEL: - ret = intel_mce_rdmsr(v, msr, val); + ret = vmce_intel_rdmsr(v, msr, val); + break; + case X86_VENDOR_AMD: + ret = vmce_amd_rdmsr(v, msr, val); break; default: ret = 0; @@ -200,7 +205,7 @@ int vmce_rdmsr(uint32_t msr, uint64_t *v * For historic version reason, bank number may greater than GUEST_MC_BANK_NUM, * when migratie from old vMCE version to new vMCE. */ -static int bank_mce_wrmsr(struct vcpu *v, u32 msr, u64 val) +static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) { int ret = 1; unsigned int bank = (msr - MSR_IA32_MC0_CTL) / 4; @@ -238,7 +243,10 @@ static int bank_mce_wrmsr(struct vcpu *v switch ( boot_cpu_data.x86_vendor ) { case X86_VENDOR_INTEL: - ret = intel_mce_wrmsr(v, msr, val); + ret = vmce_intel_wrmsr(v, msr, val); + break; + case X86_VENDOR_AMD: + ret = vmce_amd_wrmsr(v, msr, val); break; default: ret = 0; @@ -255,7 +263,7 @@ static int bank_mce_wrmsr(struct vcpu *v * = 0: Not handled, should be handled by other components * > 0: Success */ -int vmce_wrmsr(u32 msr, u64 val) +int vmce_wrmsr(uint32_t msr, uint64_t val) { struct vcpu *cur = current; int ret = 1; diff -r 07d0d5b3a005 -r 5d63c633a60b xen/arch/x86/cpu/mcheck/vmce.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/x86/cpu/mcheck/vmce.h Wed Sep 26 12:07:42 2012 +0200 @@ -0,0 +1,23 @@ +#ifndef _MCHECK_VMCE_H +#define _MCHECK_VMCE_H + +#include "x86_mca.h" + +int vmce_init(struct cpuinfo_x86 *c); + +#define dom0_vmce_enabled() (dom0 && dom0->max_vcpus && dom0->vcpu[0] \ + && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA)) + +int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d); +int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn); + +int vmce_intel_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); +int vmce_intel_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); +int vmce_amd_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); +int vmce_amd_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); + +int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d, + uint64_t gstatus); +int inject_vmce(struct domain *d, int vcpu); + +#endif _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |