[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86: cleanup rdmsr/wrmsr
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1250249195 -3600 # Node ID 46b874d603755b0b7c4579c07641dde3d2044117 # Parent 8d22ee47ec5dd215ba2379b5b29c4ef4b2aa27d8 x86: cleanup rdmsr/wrmsr Use a 64bit value instead of extracting/merging two 32bit values. Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx> Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/cpu/mcheck/mce.c | 81 ++++++++++++++++-------------------- xen/arch/x86/cpu/mcheck/mce.h | 4 - xen/arch/x86/cpu/mcheck/mce_intel.c | 4 - xen/arch/x86/hvm/hvm.c | 6 -- xen/arch/x86/hvm/svm/svm.c | 27 ++++++------ xen/arch/x86/hvm/viridian.c | 37 +++++++--------- xen/arch/x86/hvm/vmx/vmx.c | 19 ++++---- xen/arch/x86/traps.c | 27 +++++------- xen/include/asm-x86/hvm/viridian.h | 6 -- xen/include/asm-x86/processor.h | 6 -- xen/include/asm-x86/traps.h | 4 - 11 files changed, 104 insertions(+), 117 deletions(-) diff -r 8d22ee47ec5d -r 46b874d60375 xen/arch/x86/cpu/mcheck/mce.c --- a/xen/arch/x86/cpu/mcheck/mce.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce.c Fri Aug 14 12:26:35 2009 +0100 @@ -670,34 +670,33 @@ void mce_init_msr(struct domain *d) spin_lock_init(&d->arch.vmca_msrs.lock); } -int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) +int mce_rdmsr(uint32_t msr, uint64_t *val) { struct domain *d = current->domain; int ret = 1; unsigned int bank; struct bank_entry *entry = NULL; - *lo = *hi = 0x0; + *val = 0; spin_lock(&d->arch.vmca_msrs.lock); switch ( msr ) { case MSR_IA32_MCG_STATUS: - *lo = (u32)d->arch.vmca_msrs.mcg_status; - *hi = (u32)(d->arch.vmca_msrs.mcg_status >> 32); - if (*lo || *hi) + *val = d->arch.vmca_msrs.mcg_status; + if (*val) gdprintk(XENLOG_DEBUG, - "MCE: rdmsr MCG_STATUS lo %x hi %x\n", *lo, *hi); + "MCE: rdmsr MCG_STATUS 0x%"PRIx64"\n", *val); break; case MSR_IA32_MCG_CAP: - *lo = (u32)d->arch.vmca_msrs.mcg_cap; - *hi = (u32)(d->arch.vmca_msrs.mcg_cap >> 32); - gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CAP lo %x hi %x\n", *lo, *hi); + *val = d->arch.vmca_msrs.mcg_cap; + gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CAP 0x%"PRIx64"\n", + *val); break; case MSR_IA32_MCG_CTL: - *lo = (u32)d->arch.vmca_msrs.mcg_ctl; - *hi = (u32)(d->arch.vmca_msrs.mcg_ctl >> 32); - gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CTL lo %x hi %x\n", *lo, *hi); + *val = d->arch.vmca_msrs.mcg_ctl; + gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CTL 0x%"PRIx64"\n", + *val); break; case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * MAX_NR_BANKS - 1: bank = (msr - MSR_IA32_MC0_CTL) / 4; @@ -710,10 +709,9 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) switch (msr & (MSR_IA32_MC0_CTL | 3)) { case MSR_IA32_MC0_CTL: - *lo = (u32)d->arch.vmca_msrs.mci_ctl[bank]; - *hi = (u32)(d->arch.vmca_msrs.mci_ctl[bank] >> 32); - gdprintk(XENLOG_DEBUG, "MCE: rdmsr MC%u_CTL lo %x hi %x\n", - bank, *lo, *hi); + *val = d->arch.vmca_msrs.mci_ctl[bank]; + gdprintk(XENLOG_DEBUG, "MCE: rdmsr MC%u_CTL 0x%"PRIx64"\n", + bank, *val); break; case MSR_IA32_MC0_STATUS: /* Only error bank is read. Non-error banks simply return. */ @@ -722,11 +720,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) entry = list_entry(d->arch.vmca_msrs.impact_header.next, struct bank_entry, list); if (entry->bank == bank) { - *lo = entry->mci_status; - *hi = entry->mci_status >> 32; + *val = entry->mci_status; gdprintk(XENLOG_DEBUG, "MCE: rd MC%u_STATUS in vMCE# context " - "lo %x hi %x\n", bank, *lo, *hi); + "value 0x%"PRIx64"\n", bank, *val); } else entry = NULL; @@ -739,11 +736,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) struct bank_entry, list); if ( entry->bank == bank ) { - *lo = entry->mci_addr; - *hi = entry->mci_addr >> 32; + *val = entry->mci_addr; gdprintk(XENLOG_DEBUG, - "MCE: rd MC%u_ADDR in vMCE# context lo %x hi %x\n", - bank, *lo, *hi); + "MCE: rdmsr MC%u_ADDR in vMCE# context " + "0x%"PRIx64"\n", bank, *val); } } break; @@ -754,11 +750,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) struct bank_entry, list); if ( entry->bank == bank ) { - *lo = entry->mci_misc; - *hi = entry->mci_misc >> 32; + *val = entry->mci_misc; gdprintk(XENLOG_DEBUG, - "MCE: rd MC%u_MISC in vMCE# context lo %x hi %x\n", - bank, *lo, *hi); + "MCE: rd MC%u_MISC in vMCE# context " + "0x%"PRIx64"\n", bank, *val); } } break; @@ -768,7 +763,7 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) switch ( boot_cpu_data.x86_vendor ) { case X86_VENDOR_INTEL: - ret = intel_mce_rdmsr(msr, lo, hi); + ret = intel_mce_rdmsr(msr, val); break; default: ret = 0; @@ -781,7 +776,7 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) return ret; } -int mce_wrmsr(u32 msr, u64 value) +int mce_wrmsr(u32 msr, u64 val) { struct domain *d = current->domain; struct bank_entry *entry = NULL; @@ -796,18 +791,18 @@ int mce_wrmsr(u32 msr, u64 value) switch ( msr ) { case MSR_IA32_MCG_CTL: - if ( value && (value + 1) ) + if ( val && (val + 1) ) { - gdprintk(XENLOG_WARNING, "MCE: value \"%"PRIx64"\" written " - "to MCG_CTL should be all 0s or 1s\n", value); + gdprintk(XENLOG_WARNING, "MCE: val \"%"PRIx64"\" written " + "to MCG_CTL should be all 0s or 1s\n", val); ret = -1; break; } - d->arch.vmca_msrs.mcg_ctl = value; + d->arch.vmca_msrs.mcg_ctl = val; break; case MSR_IA32_MCG_STATUS: - d->arch.vmca_msrs.mcg_status = value; - gdprintk(XENLOG_DEBUG, "MCE: wrmsr MCG_STATUS %"PRIx64"\n", value); + d->arch.vmca_msrs.mcg_status = val; + gdprintk(XENLOG_DEBUG, "MCE: wrmsr MCG_STATUS %"PRIx64"\n", val); /* For HVM guest, this is the point for deleting vMCE injection node */ if ( d->is_hvm && (d->arch.vmca_msrs.nr_injection > 0) ) { @@ -845,15 +840,15 @@ int mce_wrmsr(u32 msr, u64 value) switch ( msr & (MSR_IA32_MC0_CTL | 3) ) { case MSR_IA32_MC0_CTL: - if ( value && (value + 1) ) + if ( val && (val + 1) ) { - gdprintk(XENLOG_WARNING, "MCE: value written to MC%u_CTL " + gdprintk(XENLOG_WARNING, "MCE: val written to MC%u_CTL " "should be all 0s or 1s (is %"PRIx64")\n", - bank, value); + bank, val); ret = -1; break; } - d->arch.vmca_msrs.mci_ctl[bank] = value; + d->arch.vmca_msrs.mci_ctl[bank] = val; break; case MSR_IA32_MC0_STATUS: /* Give the first entry of the list, it corresponds to current @@ -866,14 +861,14 @@ int mce_wrmsr(u32 msr, u64 value) entry = list_entry(d->arch.vmca_msrs.impact_header.next, struct bank_entry, list); if ( entry->bank == bank ) - entry->mci_status = value; + entry->mci_status = val; gdprintk(XENLOG_DEBUG, "MCE: wr MC%u_STATUS %"PRIx64" in vMCE#\n", - bank, value); + bank, val); } else gdprintk(XENLOG_DEBUG, - "MCE: wr MC%u_STATUS %"PRIx64"\n", bank, value); + "MCE: wr MC%u_STATUS %"PRIx64"\n", bank, val); break; case MSR_IA32_MC0_ADDR: gdprintk(XENLOG_WARNING, "MCE: MC%u_ADDR is read-only\n", bank); @@ -889,7 +884,7 @@ int mce_wrmsr(u32 msr, u64 value) switch ( boot_cpu_data.x86_vendor ) { case X86_VENDOR_INTEL: - ret = intel_mce_wrmsr(msr, value); + ret = intel_mce_wrmsr(msr, val); break; default: ret = 0; diff -r 8d22ee47ec5d -r 46b874d60375 xen/arch/x86/cpu/mcheck/mce.h --- a/xen/arch/x86/cpu/mcheck/mce.h Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce.h Fri Aug 14 12:26:35 2009 +0100 @@ -25,8 +25,8 @@ void amd_nonfatal_mcheck_init(struct cpu u64 mce_cap_init(void); -int intel_mce_rdmsr(u32 msr, u32 *lo, u32 *hi); -int intel_mce_wrmsr(u32 msr, u64 value); +int intel_mce_rdmsr(uint32_t msr, uint64_t *val); +int intel_mce_wrmsr(uint32_t msr, uint64_t val); int mce_available(struct cpuinfo_x86 *c); int mce_firstbank(struct cpuinfo_x86 *c); diff -r 8d22ee47ec5d -r 46b874d60375 xen/arch/x86/cpu/mcheck/mce_intel.c --- a/xen/arch/x86/cpu/mcheck/mce_intel.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Fri Aug 14 12:26:35 2009 +0100 @@ -1080,7 +1080,7 @@ int intel_mcheck_init(struct cpuinfo_x86 return 1; } -int intel_mce_wrmsr(u32 msr, u64 value) +int intel_mce_wrmsr(uint32_t msr, uint64_t val) { int ret = 1; @@ -1098,7 +1098,7 @@ int intel_mce_wrmsr(u32 msr, u64 value) return ret; } -int intel_mce_rdmsr(u32 msr, u32 *lo, u32 *hi) +int intel_mce_rdmsr(uint32_t msr, uint64_t *val) { int ret = 1; diff -r 8d22ee47ec5d -r 46b874d60375 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/hvm/hvm.c Fri Aug 14 12:26:35 2009 +0100 @@ -1782,7 +1782,6 @@ int hvm_msr_read_intercept(struct cpu_us uint64_t *var_range_base, *fixed_range_base; int index, mtrr; uint32_t cpuid[4]; - uint32_t lo, hi; int ret; var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges; @@ -1852,14 +1851,11 @@ int hvm_msr_read_intercept(struct cpu_us break; default: - ret = mce_rdmsr(ecx, &lo, &hi); + ret = mce_rdmsr(ecx, &msr_content); if ( ret < 0 ) goto gp_fault; else if ( ret ) - { - msr_content = ((u64)hi << 32) | lo; break; - } /* ret == 0, This is not an MCE MSR, see other MSRs */ else if (!ret) return hvm_funcs.msr_read_intercept(regs); diff -r 8d22ee47ec5d -r 46b874d60375 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Aug 14 12:26:35 2009 +0100 @@ -1085,20 +1085,23 @@ static int svm_msr_read_intercept(struct break; default: - if ( rdmsr_viridian_regs(ecx, &eax, &edx) || - rdmsr_hypervisor_regs(ecx, &eax, &edx) || - rdmsr_safe(ecx, eax, edx) == 0 ) + + if ( rdmsr_viridian_regs(ecx, &msr_content) || + rdmsr_hypervisor_regs(ecx, &msr_content) ) + break; + + if ( rdmsr_safe(ecx, eax, edx) == 0 ) { - regs->eax = eax; - regs->edx = edx; - goto done; + msr_content = ((uint64_t)edx << 32) | eax; + break; } + goto gpf; } - regs->eax = msr_content & 0xFFFFFFFF; - regs->edx = msr_content >> 32; - - done: + + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); + HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx); HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", ecx, (unsigned long)regs->eax, (unsigned long)regs->edx); @@ -1164,13 +1167,13 @@ static int svm_msr_write_intercept(struc break; default: - if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) ) + if ( wrmsr_viridian_regs(ecx, msr_content) ) break; switch ( long_mode_do_msr_write(regs) ) { case HNDL_unhandled: - wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx); + wrmsr_hypervisor_regs(ecx, msr_content); break; case HNDL_exception_raised: return X86EMUL_EXCEPTION; diff -r 8d22ee47ec5d -r 46b874d60375 xen/arch/x86/hvm/viridian.c --- a/xen/arch/x86/hvm/viridian.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/hvm/viridian.c Fri Aug 14 12:26:35 2009 +0100 @@ -129,10 +129,9 @@ static void enable_hypercall_page(void) put_page_and_type(mfn_to_page(mfn)); } -int wrmsr_viridian_regs(uint32_t idx, uint32_t eax, uint32_t edx) +int wrmsr_viridian_regs(uint32_t idx, uint64_t val) { struct domain *d = current->domain; - uint64_t val = ((uint64_t)edx << 32) | eax; if ( !is_viridian_domain(d) ) return 0; @@ -178,6 +177,7 @@ int wrmsr_viridian_regs(uint32_t idx, ui break; case VIRIDIAN_MSR_ICR: { + u32 eax = (u32)val, edx = (u32)(val >> 32); struct vlapic *vlapic = vcpu_vlapic(current); perfc_incr(mshv_wrmsr_icr); eax &= ~(1 << 12); @@ -190,7 +190,7 @@ int wrmsr_viridian_regs(uint32_t idx, ui case VIRIDIAN_MSR_TPR: perfc_incr(mshv_wrmsr_tpr); - vlapic_set_reg(vcpu_vlapic(current), APIC_TASKPRI, eax & 0xff); + vlapic_set_reg(vcpu_vlapic(current), APIC_TASKPRI, (uint8_t)val); break; case VIRIDIAN_MSR_APIC_ASSIST: @@ -224,9 +224,8 @@ int wrmsr_viridian_regs(uint32_t idx, ui return 1; } -int rdmsr_viridian_regs(uint32_t idx, uint32_t *eax, uint32_t *edx) -{ - uint64_t val; +int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) +{ struct vcpu *v = current; if ( !is_viridian_domain(v->domain) ) @@ -236,36 +235,34 @@ int rdmsr_viridian_regs(uint32_t idx, ui { case VIRIDIAN_MSR_GUEST_OS_ID: perfc_incr(mshv_rdmsr_osid); - val = v->domain->arch.hvm_domain.viridian.guest_os_id.raw; + *val = v->domain->arch.hvm_domain.viridian.guest_os_id.raw; break; case VIRIDIAN_MSR_HYPERCALL: perfc_incr(mshv_rdmsr_hc_page); - val = v->domain->arch.hvm_domain.viridian.hypercall_gpa.raw; + *val = v->domain->arch.hvm_domain.viridian.hypercall_gpa.raw; break; case VIRIDIAN_MSR_VP_INDEX: perfc_incr(mshv_rdmsr_vp_index); - val = v->vcpu_id; + *val = v->vcpu_id; break; case VIRIDIAN_MSR_ICR: perfc_incr(mshv_rdmsr_icr); - val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) | - vlapic_get_reg(vcpu_vlapic(v), APIC_ICR)); + *val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) | + vlapic_get_reg(vcpu_vlapic(v), APIC_ICR)); break; case VIRIDIAN_MSR_TPR: perfc_incr(mshv_rdmsr_tpr); - val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI); - break; - - default: - return 0; - } - - *eax = val; - *edx = val >> 32; + *val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI); + break; + + default: + return 0; + } + return 1; } diff -r 8d22ee47ec5d -r 46b874d60375 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Aug 14 12:26:35 2009 +0100 @@ -1849,13 +1849,14 @@ static int vmx_msr_read_intercept(struct break; } - if ( rdmsr_viridian_regs(ecx, &eax, &edx) || - rdmsr_hypervisor_regs(ecx, &eax, &edx) || - rdmsr_safe(ecx, eax, edx) == 0 ) - { - regs->eax = eax; - regs->edx = edx; - goto done; + if ( rdmsr_viridian_regs(ecx, &msr_content) || + rdmsr_hypervisor_regs(ecx, &msr_content) ) + break; + + if ( rdmsr_safe(ecx, eax, edx) == 0 ) + { + msr_content = ((uint64_t)edx << 32) | eax; + break; } goto gp_fault; @@ -2029,7 +2030,7 @@ static int vmx_msr_write_intercept(struc if ( passive_domain_do_wrmsr(regs) ) return X86EMUL_OKAY; - if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) ) + if ( wrmsr_viridian_regs(ecx, msr_content) ) break; switch ( long_mode_do_msr_write(regs) ) @@ -2037,7 +2038,7 @@ static int vmx_msr_write_intercept(struc case HNDL_unhandled: if ( (vmx_write_guest_msr(ecx, msr_content) != 0) && !is_last_branch_msr(ecx) ) - wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx); + wrmsr_hypervisor_regs(ecx, msr_content); break; case HNDL_exception_raised: return X86EMUL_EXCEPTION; diff -r 8d22ee47ec5d -r 46b874d60375 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/traps.c Fri Aug 14 12:26:35 2009 +0100 @@ -603,8 +603,7 @@ DO_ERROR( TRAP_alignment_check, al DO_ERROR( TRAP_alignment_check, alignment_check) DO_ERROR_NOCODE(TRAP_simd_error, simd_coprocessor_error) -int rdmsr_hypervisor_regs( - uint32_t idx, uint32_t *eax, uint32_t *edx) +int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *val) { struct domain *d = current->domain; /* Optionally shift out of the way of Viridian architectural MSRs. */ @@ -618,7 +617,7 @@ int rdmsr_hypervisor_regs( { case 0: { - *eax = *edx = 0; + *val = 0; break; } default: @@ -628,8 +627,7 @@ int rdmsr_hypervisor_regs( return 1; } -int wrmsr_hypervisor_regs( - uint32_t idx, uint32_t eax, uint32_t edx) +int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val) { struct domain *d = current->domain; /* Optionally shift out of the way of Viridian architectural MSRs. */ @@ -643,10 +641,10 @@ int wrmsr_hypervisor_regs( { case 0: { - void *hypercall_page; + void *hypercall_page; unsigned long mfn; - unsigned long gmfn = ((unsigned long)edx << 20) | (eax >> 12); - unsigned int idx = eax & 0xfff; + unsigned long gmfn = val >> 12; + unsigned int idx = val & 0xfff; if ( idx > 0 ) { @@ -1696,7 +1694,8 @@ static int emulate_privileged_op(struct unsigned long code_base, code_limit; char io_emul_stub[32]; void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1))); - u32 l, h; + uint32_t l, h; + uint64_t val; if ( !read_descriptor(regs->cs, v, regs, &code_base, &code_limit, &ar, @@ -2246,7 +2245,7 @@ static int emulate_privileged_op(struct goto fail; break; default: - if ( wrmsr_hypervisor_regs(regs->ecx, eax, edx) ) + if ( wrmsr_hypervisor_regs(regs->ecx, val) ) break; rc = mce_wrmsr(regs->ecx, val); @@ -2328,15 +2327,15 @@ static int emulate_privileged_op(struct case MSR_EFER: case MSR_AMD_PATCHLEVEL: default: - if ( rdmsr_hypervisor_regs(regs->ecx, &l, &h) ) + if ( rdmsr_hypervisor_regs(regs->ecx, &val) ) { rdmsr_writeback: - regs->eax = l; - regs->edx = h; + regs->eax = (uint32_t)val; + regs->edx = (uint32_t)(val >> 32); break; } - rc = mce_rdmsr(regs->ecx, &l, &h); + rc = mce_rdmsr(regs->ecx, &val); if ( rc < 0 ) goto fail; if ( rc ) diff -r 8d22ee47ec5d -r 46b874d60375 xen/include/asm-x86/hvm/viridian.h --- a/xen/include/asm-x86/hvm/viridian.h Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/include/asm-x86/hvm/viridian.h Fri Aug 14 12:26:35 2009 +0100 @@ -50,14 +50,12 @@ int int wrmsr_viridian_regs( uint32_t idx, - uint32_t eax, - uint32_t edx); + uint64_t val); int rdmsr_viridian_regs( uint32_t idx, - uint32_t *eax, - uint32_t *edx); + uint64_t *val); int viridian_hypercall(struct cpu_user_regs *regs); diff -r 8d22ee47ec5d -r 46b874d60375 xen/include/asm-x86/processor.h --- a/xen/include/asm-x86/processor.h Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/include/asm-x86/processor.h Fri Aug 14 12:26:35 2009 +0100 @@ -551,10 +551,8 @@ void cpu_mcheck_disable(void); int cpuid_hypervisor_leaves( uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); -int rdmsr_hypervisor_regs( - uint32_t idx, uint32_t *eax, uint32_t *edx); -int wrmsr_hypervisor_regs( - uint32_t idx, uint32_t eax, uint32_t edx); +int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *val); +int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val); int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len); int microcode_resume_cpu(int cpu); diff -r 8d22ee47ec5d -r 46b874d60375 xen/include/asm-x86/traps.h --- a/xen/include/asm-x86/traps.h Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/include/asm-x86/traps.h Fri Aug 14 12:26:35 2009 +0100 @@ -49,7 +49,7 @@ extern int send_guest_trap(struct domain /* Guest vMCE MSRs virtualization */ extern void mce_init_msr(struct domain *d); -extern int mce_wrmsr(u32 msr, u64 value); -extern int mce_rdmsr(u32 msr, u32 *lo, u32 *hi); +extern int mce_wrmsr(uint32_t msr, uint64_t val); +extern int mce_rdmsr(uint32_t msr, uint64_t *val); #endif /* ASM_TRAP_H */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |