[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [IA64] Optimize some functions
# HG changeset patch # User Alex Williamson <alex.williamson@xxxxxx> # Date 1178222128 21600 # Node ID 6cf6f49f26abd61e011a6513512478d7d2a0af70 # Parent 2b653a785fb890265100059f49347c8e45a96314 [IA64] Optimize some functions Optmize some functions by changing parameter passing mode from pointer to value. This can reduce redundant memory access. Signed-off-by: Xu, Anthony <Anthony.xu@xxxxxxxxx> Signed-off-by: Zhang Xin <xing.z.zhang@xxxxxxxxx> --- xen/arch/ia64/vmx/vmmu.c | 56 +++++---- xen/arch/ia64/vmx/vmx_interrupt.c | 4 xen/arch/ia64/vmx/vmx_process.c | 8 - xen/arch/ia64/vmx/vmx_vcpu.c | 8 - xen/arch/ia64/vmx/vmx_virt.c | 28 ++-- xen/include/asm-ia64/vmx_vcpu.h | 224 ++++++++++++++++---------------------- 6 files changed, 154 insertions(+), 174 deletions(-) diff -r 2b653a785fb8 -r 6cf6f49f26ab xen/arch/ia64/vmx/vmmu.c --- a/xen/arch/ia64/vmx/vmmu.c Thu May 03 13:36:06 2007 -0600 +++ b/xen/arch/ia64/vmx/vmmu.c Thu May 03 13:55:28 2007 -0600 @@ -295,7 +295,7 @@ int vhpt_enabled(VCPU *vcpu, uint64_t va vpsr.val = VCPU(vcpu, vpsr); vcpu_get_rr(vcpu, vadr, &vrr.rrval); - vmx_vcpu_get_pta(vcpu,&vpta.val); + vpta.val = vmx_vcpu_get_pta(vcpu); if ( vrr.ve & vpta.ve ) { switch ( ref ) { @@ -629,38 +629,41 @@ again: /* Try again if VCPU has migrated } -IA64FAULT vmx_vcpu_thash(VCPU *vcpu, u64 vadr, u64 *pval) +u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr) { PTA vpta; ia64_rr vrr; + u64 pval; u64 vhpt_offset; - vmx_vcpu_get_pta(vcpu, &vpta.val); + vpta.val = vmx_vcpu_get_pta(vcpu); vcpu_get_rr(vcpu, vadr, &vrr.rrval); if(vpta.vf){ - *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0); - *pval = vpta.val & ~0xffff; + pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.rrval, + vpta.val, 0, 0, 0, 0); + pval = vpta.val & ~0xffff; }else{ vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1); - *pval = (vadr&VRN_MASK)| + pval = (vadr & VRN_MASK) | (vpta.val<<3>>(vpta.size+3)<<(vpta.size))| vhpt_offset; } - return IA64_NO_FAULT; -} - - -IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, u64 vadr, u64 *pval) + return pval; +} + + +u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr) { ia64_rr vrr; PTA vpta; - vmx_vcpu_get_pta(vcpu, &vpta.val); + u64 pval; + vpta.val = vmx_vcpu_get_pta(vcpu); vcpu_get_rr(vcpu, vadr, &vrr.rrval); if(vpta.vf){ - *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0); + pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.rrval, 0, 0, 0, 0, 0); }else{ - *pval = 1; - } - return IA64_NO_FAULT; + pval = 1; + } + return pval; } @@ -725,7 +728,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v } } else{ - vmx_vcpu_thash(vcpu, vadr, &vhpt_adr); + vhpt_adr = vmx_vcpu_thash(vcpu, vadr); data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB); if(data){ if(vpsr.ic){ @@ -753,20 +756,21 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v } } -IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 vadr, u64 *key) +u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr) { thash_data_t *data; PTA vpta; - vmx_vcpu_get_pta(vcpu, &vpta.val); + u64 key; + vpta.val = vmx_vcpu_get_pta(vcpu); if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){ - *key=1; - return IA64_NO_FAULT; + key=1; + return key; } data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); if(!data||!data->p){ - *key=1; + key = 1; }else{ - *key=data->key; - } - return IA64_NO_FAULT; -} + key = data->key; + } + return key; +} diff -r 2b653a785fb8 -r 6cf6f49f26ab xen/arch/ia64/vmx/vmx_interrupt.c --- a/xen/arch/ia64/vmx/vmx_interrupt.c Thu May 03 13:36:06 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c Thu May 03 13:55:28 2007 -0600 @@ -105,7 +105,7 @@ inject_guest_interruption(VCPU *vcpu, u6 collect_interruption(vcpu); vmx_ia64_set_dcr(vcpu); - vmx_vcpu_get_iva(vcpu,&viva); + viva = vmx_vcpu_get_iva(vcpu); regs->cr_iip = viva + vec; } @@ -135,7 +135,7 @@ set_ifa_itir_iha (VCPU *vcpu, u64 vadr, } if ( set_iha) { - vmx_vcpu_thash(vcpu, vadr, &value); + value = vmx_vcpu_thash(vcpu, vadr); vcpu_set_iha(vcpu, value); } } diff -r 2b653a785fb8 -r 6cf6f49f26ab xen/arch/ia64/vmx/vmx_process.c --- a/xen/arch/ia64/vmx/vmx_process.c Thu May 03 13:36:06 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_process.c Thu May 03 13:55:28 2007 -0600 @@ -353,7 +353,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r } } - vmx_vcpu_get_pta(v, &vpta.val); + vpta.val = vmx_vcpu_get_pta(v); if (vpta.vf) { /* Long format is not yet supported. */ if (vpsr.ic) { @@ -378,7 +378,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r } } - vmx_vcpu_thash(v, vadr, &vhpt_adr); + vhpt_adr = vmx_vcpu_thash(v, vadr); if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { /* VHPT successfully read. */ if (!(pteval & _PAGE_P)) { @@ -424,7 +424,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r return IA64_FAULT; } - vmx_vcpu_get_pta(v, &vpta.val); + vpta.val = vmx_vcpu_get_pta(v); if (vpta.vf) { /* Long format is not yet supported. */ vcpu_set_isr(v, misr.val); @@ -433,7 +433,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r } - vmx_vcpu_thash(v, vadr, &vhpt_adr); + vhpt_adr = vmx_vcpu_thash(v, vadr); if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { /* VHPT successfully read. */ if (pteval & _PAGE_P) { diff -r 2b653a785fb8 -r 6cf6f49f26ab xen/arch/ia64/vmx/vmx_vcpu.c --- a/xen/arch/ia64/vmx/vmx_vcpu.c Thu May 03 13:36:06 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Thu May 03 13:55:28 2007 -0600 @@ -202,11 +202,9 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6 VCPU protection key register access routines **************************************************************************/ -IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg, u64 *pval) -{ - u64 val = (u64)ia64_get_pkr(reg); - *pval = val; - return (IA64_NO_FAULT); +u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg) +{ + return ((u64)ia64_get_pkr(reg)); } IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val) diff -r 2b653a785fb8 -r 6cf6f49f26ab xen/arch/ia64/vmx/vmx_virt.c --- a/xen/arch/ia64/vmx/vmx_virt.c Thu May 03 13:36:06 2007 -0600 +++ b/xen/arch/ia64/vmx/vmx_virt.c Thu May 03 13:55:28 2007 -0600 @@ -446,7 +446,7 @@ static IA64FAULT vmx_emul_thash(VCPU *vc return IA64_NO_FAULT; } #endif //CHECK_FAULT - vmx_vcpu_thash(vcpu, r3, &r1); + r1 = vmx_vcpu_thash(vcpu, r3); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT); } @@ -478,7 +478,7 @@ static IA64FAULT vmx_emul_ttag(VCPU *vcp return IA64_NO_FAULT; } #endif //CHECK_FAULT - vmx_vcpu_ttag(vcpu, r3, &r1); + r1 = vmx_vcpu_ttag(vcpu, r3); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT); } @@ -554,9 +554,7 @@ static IA64FAULT vmx_emul_tak(VCPU *vcpu return IA64_FAULT; #endif } - if(vmx_vcpu_tak(vcpu, r3, &r1)){ - return IA64_FAULT; - } + r1 = vmx_vcpu_tak(vcpu, r3); vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); return(IA64_NO_FAULT); } @@ -833,7 +831,7 @@ static IA64FAULT vmx_emul_mov_from_ar_re return IA64_FAULT; } #endif // CHECK_FAULT - vmx_vcpu_get_itc(vcpu,&r1); + r1 = vmx_vcpu_get_itc(vcpu); vcpu_set_gr(vcpu,inst.M31.r1,r1,0); return IA64_NO_FAULT; } @@ -1057,7 +1055,7 @@ static IA64FAULT vmx_emul_mov_from_pkr(V return IA64_FAULT; } #endif //CHECK_FAULT - vmx_vcpu_get_pkr(vcpu,r3,&r1); + r1 = vmx_vcpu_get_pkr(vcpu, r3); return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } @@ -1094,7 +1092,7 @@ static IA64FAULT vmx_emul_mov_from_dbr(V return IA64_FAULT; } #endif //CHECK_FAULT - vmx_vcpu_get_dbr(vcpu,r3,&r1); + r1 = vmx_vcpu_get_dbr(vcpu, r3); return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } @@ -1131,7 +1129,7 @@ static IA64FAULT vmx_emul_mov_from_ibr(V return IA64_FAULT; } #endif //CHECK_FAULT - vmx_vcpu_get_ibr(vcpu,r3,&r1); + r1 = vmx_vcpu_get_ibr(vcpu, r3); return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } @@ -1168,7 +1166,7 @@ static IA64FAULT vmx_emul_mov_from_pmc(V return IA64_FAULT; } #endif //CHECK_FAULT - vmx_vcpu_get_pmc(vcpu,r3,&r1); + r1 = vmx_vcpu_get_pmc(vcpu, r3); return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } @@ -1196,7 +1194,7 @@ static IA64FAULT vmx_emul_mov_from_cpuid return IA64_FAULT; } #endif //CHECK_FAULT - vmx_vcpu_get_cpuid(vcpu,r3,&r1); + r1 = vmx_vcpu_get_cpuid(vcpu, r3); return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); } @@ -1274,9 +1272,15 @@ static IA64FAULT vmx_emul_mov_to_cr(VCPU ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ vcpu_set_gr(vcpu, tgt, val,0):fault; +//#define cr_get(cr) (vcpu_set_gr(vcpu, tgt, vcpu_get##cr(vcpu), 0) + +/* #define vmx_cr_get(cr) \ ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ vcpu_set_gr(vcpu, tgt, val,0):fault; +*/ + +#define vmx_cr_get(cr) (vcpu_set_gr(vcpu, tgt, vmx_vcpu_get_##cr(vcpu), 0)) static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst) { @@ -1317,7 +1321,7 @@ static IA64FAULT vmx_emul_mov_from_cr(VC case 25:return cr_get(iha); case 64:return vmx_cr_get(lid); case 65: - vmx_vcpu_get_ivr(vcpu,&val); + val = vmx_vcpu_get_ivr(vcpu); return vcpu_set_gr(vcpu,tgt,val,0); case 66:return vmx_cr_get(tpr); case 67:return vcpu_set_gr(vcpu,tgt,0L,0); diff -r 2b653a785fb8 -r 6cf6f49f26ab xen/include/asm-ia64/vmx_vcpu.h --- a/xen/include/asm-ia64/vmx_vcpu.h Thu May 03 13:36:06 2007 -0600 +++ b/xen/include/asm-ia64/vmx_vcpu.h Thu May 03 13:55:28 2007 -0600 @@ -61,7 +61,7 @@ extern void vmx_vcpu_set_psr_sync_mpsr(V extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, u64 value); extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu); extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val); -extern IA64FAULT vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval); +extern u64 vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg); IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val); extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa); extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa); @@ -75,11 +75,11 @@ extern IA64FAULT vmx_vcpu_ptc_e(VCPU * v extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr); extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps); extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps); -extern IA64FAULT vmx_vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval); +extern u64 vmx_vcpu_thash(VCPU * vcpu, u64 vadr); extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa); -extern IA64FAULT vmx_vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * pval); +extern u64 vmx_vcpu_ttag(VCPU * vcpu, u64 vadr); extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr); -extern IA64FAULT vmx_vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key); +extern u64 vmx_vcpu_tak(VCPU * vcpu, u64 vadr); extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu); extern u64 vmx_vcpu_get_psr(VCPU * vcpu); extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val); @@ -132,100 +132,84 @@ extern void vmx_ia64_set_dcr(VCPU * v); VCPU control register access routines **************************************************************************/ -static inline IA64FAULT vmx_vcpu_get_itm(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, itm); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_iva(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, iva); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_pta(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, pta); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_lid(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, lid); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_ivr(VCPU * vcpu, u64 * pval) -{ - *pval = guest_read_vivr(vcpu); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_tpr(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, tpr); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_eoi(VCPU * vcpu, u64 * pval) -{ - *pval = 0L; // reads of eoi always return 0 - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_irr0(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, irr[0]); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_irr1(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, irr[1]); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_irr2(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, irr[2]); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_irr3(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, irr[3]); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_itv(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, itv); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_pmv(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, pmv); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_cmcv(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, cmcv); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_lrr0(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, lrr0); - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_lrr1(VCPU * vcpu, u64 * pval) -{ - *pval = VCPU(vcpu, lrr1); - return IA64_NO_FAULT; +static inline u64 vmx_vcpu_get_itm(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, itm)); +} + +static inline u64 vmx_vcpu_get_iva(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, iva)); +} + +static inline u64 vmx_vcpu_get_pta(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, pta)); +} + +static inline u64 vmx_vcpu_get_lid(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, lid)); +} + +static inline u64 vmx_vcpu_get_ivr(VCPU * vcpu) +{ + return ((u64)guest_read_vivr(vcpu)); +} + +static inline u64 vmx_vcpu_get_tpr(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, tpr)); +} + +static inline u64 vmx_vcpu_get_eoi(VCPU * vcpu) +{ + return (0UL); // reads of eoi always return 0 +} + +static inline u64 vmx_vcpu_get_irr0(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, irr[0])); +} + +static inline u64 vmx_vcpu_get_irr1(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, irr[1])); +} + +static inline u64 vmx_vcpu_get_irr2(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, irr[2])); +} + +static inline u64 vmx_vcpu_get_irr3(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, irr[3])); +} + +static inline u64 vmx_vcpu_get_itv(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, itv)); +} + +static inline u64 vmx_vcpu_get_pmv(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, pmv)); +} + +static inline u64 vmx_vcpu_get_cmcv(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, cmcv)); +} + +static inline u64 vmx_vcpu_get_lrr0(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, lrr0)); +} + +static inline u64 vmx_vcpu_get_lrr1(VCPU * vcpu) +{ + return ((u64)VCPU(vcpu, lrr1)); } static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val) @@ -299,10 +283,9 @@ static inline IA64FAULT vmx_vcpu_set_itc return IA64_NO_FAULT; } -static inline IA64FAULT vmx_vcpu_get_itc(VCPU * vcpu, u64 * val) -{ - *val = vtm_get_itc(vcpu); - return IA64_NO_FAULT; +static inline u64 vmx_vcpu_get_itc(VCPU * vcpu) +{ + return ((u64)vtm_get_itc(vcpu)); } /* @@ -317,7 +300,7 @@ IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, u6 VCPU debug breakpoint register access routines **************************************************************************/ -static inline IA64FAULT vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval) +static inline u64 vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg) { // TODO: unimplemented DBRs return a reserved register fault // TODO: Should set Logical CPU state, not just physical @@ -325,8 +308,7 @@ static inline IA64FAULT vmx_vcpu_get_cpu panic_domain(vcpu_regs(vcpu), "there are only five cpuid registers"); } - *pval = VCPU(vcpu, vcpuid[reg]); - return IA64_NO_FAULT; + return ((u64)VCPU(vcpu, vcpuid[reg])); } static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val) @@ -345,20 +327,16 @@ static inline IA64FAULT vmx_vcpu_set_ibr return IA64_NO_FAULT; } -static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval) +static inline u64 vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg) { // TODO: unimplemented DBRs return a reserved register fault - u64 val = ia64_get_dbr(reg); - *pval = val; - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval) + return ((u64)ia64_get_dbr(reg)); +} + +static inline u64 vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg) { // TODO: unimplemented IBRs return a reserved register fault - u64 val = ia64_get_ibr(reg); - *pval = val; - return IA64_NO_FAULT; + return ((u64)ia64_get_ibr(reg)); } /************************************************************************** @@ -380,20 +358,16 @@ static inline IA64FAULT vmx_vcpu_set_pmd return IA64_NO_FAULT; } -static inline IA64FAULT vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval) +static inline u64 vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg) { // NOTE: Reads from unimplemented PMC registers return zero - u64 val = (u64) ia64_get_pmc(reg); - *pval = val; - return IA64_NO_FAULT; -} - -static inline IA64FAULT vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval) + return ((u64)ia64_get_pmc(reg)); +} + +static inline u64 vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg) { // NOTE: Reads from unimplemented PMD registers return zero - u64 val = (u64) ia64_get_pmd(reg); - *pval = val; - return IA64_NO_FAULT; + return ((u64)ia64_get_pmd(reg)); } /************************************************************************** _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |