[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] vVMX: use latched VMCS machine address
commit d02e84b9d9d16b6b56186f0dfdcb3c90b83c82a3 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Tue Nov 24 12:07:27 2015 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Tue Nov 24 12:07:27 2015 +0100 vVMX: use latched VMCS machine address Instead of calling domain_page_map_to_mfn() over and over, latch the guest VMCS machine address unconditionally (i.e. independent of whether VMCS shadowing is supported by the hardware). Since this requires altering the parameters of __[gs]et_vmcs{,_real}() (and hence all their callers) anyway, take the opportunity to also drop the bogus double underscores from their names (and from __[gs]et_vmcs_virtual() as well). Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx> --- xen/arch/x86/hvm/vmx/intr.c | 4 +- xen/arch/x86/hvm/vmx/vmcs.c | 21 ++-- xen/arch/x86/hvm/vmx/vmx.c | 6 +- xen/arch/x86/hvm/vmx/vvmx.c | 308 ++++++++++++++++-------------------- xen/include/asm-x86/hvm/vmx/vmcs.h | 10 +- xen/include/asm-x86/hvm/vmx/vvmx.h | 26 ++-- 6 files changed, 169 insertions(+), 206 deletions(-) diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c index 56c40b1..8fca08c 100644 --- a/xen/arch/x86/hvm/vmx/intr.c +++ b/xen/arch/x86/hvm/vmx/intr.c @@ -191,13 +191,13 @@ static int nvmx_intr_intercept(struct vcpu *v, struct hvm_intack intack) if ( intack.source == hvm_intsrc_pic || intack.source == hvm_intsrc_lapic ) { - ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, PIN_BASED_VM_EXEC_CONTROL); + ctrl = get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL); if ( !(ctrl & PIN_BASED_EXT_INTR_MASK) ) return 0; vmx_inject_extint(intack.vector, intack.source); - ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, VM_EXIT_CONTROLS); + ctrl = get_vvmcs(v, VM_EXIT_CONTROLS); if ( ctrl & VM_EXIT_ACK_INTR_ON_EXIT ) { /* for now, duplicate the ack path in vmx_intr_assist */ diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 7cdb063..d6dab8e 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -932,37 +932,36 @@ void vmx_vmcs_switch(paddr_t from, paddr_t to) spin_unlock(&vmx->vmcs_lock); } -void virtual_vmcs_enter(void *vvmcs) +void virtual_vmcs_enter(const struct vcpu *v) { - __vmptrld(pfn_to_paddr(domain_page_map_to_mfn(vvmcs))); + __vmptrld(v->arch.hvm_vmx.vmcs_shadow_maddr); } -void virtual_vmcs_exit(void *vvmcs) +void virtual_vmcs_exit(const struct vcpu *v) { paddr_t cur = this_cpu(current_vmcs); - __vmpclear(pfn_to_paddr(domain_page_map_to_mfn(vvmcs))); + __vmpclear(v->arch.hvm_vmx.vmcs_shadow_maddr); if ( cur ) __vmptrld(cur); - } -u64 virtual_vmcs_vmread(void *vvmcs, u32 vmcs_encoding) +u64 virtual_vmcs_vmread(const struct vcpu *v, u32 vmcs_encoding) { u64 res; - virtual_vmcs_enter(vvmcs); + virtual_vmcs_enter(v); __vmread(vmcs_encoding, &res); - virtual_vmcs_exit(vvmcs); + virtual_vmcs_exit(v); return res; } -void virtual_vmcs_vmwrite(void *vvmcs, u32 vmcs_encoding, u64 val) +void virtual_vmcs_vmwrite(const struct vcpu *v, u32 vmcs_encoding, u64 val) { - virtual_vmcs_enter(vvmcs); + virtual_vmcs_enter(v); __vmwrite(vmcs_encoding, val); - virtual_vmcs_exit(vvmcs); + virtual_vmcs_exit(v); } static int construct_vmcs(struct vcpu *v) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index eb6248e..4b8d2fa 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1458,8 +1458,7 @@ void vmx_inject_extint(int trap, uint8_t source) u32 pin_based_cntrl; if ( nestedhvm_vcpu_in_guestmode(v) ) { - pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - PIN_BASED_VM_EXEC_CONTROL); + pin_based_cntrl = get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL); if ( pin_based_cntrl & PIN_BASED_EXT_INTR_MASK ) { nvmx_enqueue_n2_exceptions (v, INTR_INFO_VALID_MASK | @@ -1479,8 +1478,7 @@ void vmx_inject_nmi(void) u32 pin_based_cntrl; if ( nestedhvm_vcpu_in_guestmode(v) ) { - pin_based_cntrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, - PIN_BASED_VM_EXEC_CONTROL); + pin_based_cntrl = get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL); if ( pin_based_cntrl & PIN_BASED_NMI_EXITING ) { nvmx_enqueue_n2_exceptions (v, INTR_INFO_VALID_MASK | diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index ea1052e..e48d872 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -175,11 +175,7 @@ int nvmx_vcpu_reset(struct vcpu *v) uint64_t nvmx_vcpu_eptp_base(struct vcpu *v) { - uint64_t eptp_base; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - - eptp_base = __get_vvmcs(nvcpu->nv_vvmcx, EPT_POINTER); - return eptp_base & PAGE_MASK; + return get_vvmcs(v, EPT_POINTER) & PAGE_MASK; } bool_t nvmx_ept_enabled(struct vcpu *v) @@ -236,7 +232,7 @@ static int vvmcs_offset(u32 width, u32 type, u32 index) return offset; } -u64 __get_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding) +u64 get_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding) { union vmcs_encoding enc; u64 *content = (u64 *) vvmcs; @@ -266,12 +262,12 @@ u64 __get_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding) return res; } -u64 __get_vvmcs_real(void *vvmcs, u32 vmcs_encoding) +u64 get_vvmcs_real(const struct vcpu *v, u32 encoding) { - return virtual_vmcs_vmread(vvmcs, vmcs_encoding); + return virtual_vmcs_vmread(v, encoding); } -void __set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val) +void set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val) { union vmcs_encoding enc; u64 *content = (u64 *) vvmcs; @@ -307,9 +303,9 @@ void __set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val) content[offset] = res; } -void __set_vvmcs_real(void *vvmcs, u32 vmcs_encoding, u64 val) +void set_vvmcs_real(const struct vcpu *v, u32 encoding, u64 val) { - virtual_vmcs_vmwrite(vvmcs, vmcs_encoding, val); + virtual_vmcs_vmwrite(v, encoding, val); } static unsigned long reg_read(struct cpu_user_regs *regs, @@ -331,25 +327,20 @@ static void reg_write(struct cpu_user_regs *regs, static inline u32 __n2_pin_exec_control(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - - return __get_vvmcs(nvcpu->nv_vvmcx, PIN_BASED_VM_EXEC_CONTROL); + return get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL); } static inline u32 __n2_exec_control(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - - return __get_vvmcs(nvcpu->nv_vvmcx, CPU_BASED_VM_EXEC_CONTROL); + return get_vvmcs(v, CPU_BASED_VM_EXEC_CONTROL); } static inline u32 __n2_secondary_exec_control(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); u64 second_ctrl = 0; if ( __n2_exec_control(v) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS ) - second_ctrl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL); + second_ctrl = get_vvmcs(v, SECONDARY_VM_EXEC_CONTROL); return second_ctrl; } @@ -502,18 +493,17 @@ static void vmreturn(struct cpu_user_regs *regs, enum vmx_ops_result ops_res) bool_t nvmx_intercepts_exception(struct vcpu *v, unsigned int trap, int error_code) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); u32 exception_bitmap, pfec_match=0, pfec_mask=0; int r; ASSERT ( trap < 32 ); - exception_bitmap = __get_vvmcs(nvcpu->nv_vvmcx, EXCEPTION_BITMAP); + exception_bitmap = get_vvmcs(v, EXCEPTION_BITMAP); r = exception_bitmap & (1 << trap) ? 1: 0; if ( trap == TRAP_page_fault ) { - pfec_match = __get_vvmcs(nvcpu->nv_vvmcx, PAGE_FAULT_ERROR_CODE_MATCH); - pfec_mask = __get_vvmcs(nvcpu->nv_vvmcx, PAGE_FAULT_ERROR_CODE_MASK); + pfec_match = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MATCH); + pfec_mask = get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MASK); if ( (error_code & pfec_mask) != pfec_match ) r = !r; } @@ -528,9 +518,7 @@ static inline u32 __shadow_control(struct vcpu *v, unsigned int field, u32 host_value) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - - return (u32) __get_vvmcs(nvcpu->nv_vvmcx, field) | host_value; + return get_vvmcs(v, field) | host_value; } static void set_shadow_control(struct vcpu *v, @@ -597,13 +585,12 @@ void nvmx_update_secondary_exec_control(struct vcpu *v, unsigned long host_cntrl) { u32 shadow_cntrl; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); struct nestedvmx *nvmx = &vcpu_2_nvmx(v); u32 apicv_bit = SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; host_cntrl &= ~apicv_bit; - shadow_cntrl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL); + shadow_cntrl = get_vvmcs(v, SECONDARY_VM_EXEC_CONTROL); /* No vAPIC-v support, so it shouldn't be set in vmcs12. */ ASSERT(!(shadow_cntrl & apicv_bit)); @@ -616,10 +603,9 @@ void nvmx_update_secondary_exec_control(struct vcpu *v, static void nvmx_update_pin_control(struct vcpu *v, unsigned long host_cntrl) { u32 shadow_cntrl; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); host_cntrl &= ~PIN_BASED_POSTED_INTERRUPT; - shadow_cntrl = __get_vvmcs(nvcpu->nv_vvmcx, PIN_BASED_VM_EXEC_CONTROL); + shadow_cntrl = get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL); /* No vAPIC-v support, so it shouldn't be set in vmcs12. */ ASSERT(!(shadow_cntrl & PIN_BASED_POSTED_INTERRUPT)); @@ -631,9 +617,8 @@ static void nvmx_update_pin_control(struct vcpu *v, unsigned long host_cntrl) static void nvmx_update_exit_control(struct vcpu *v, unsigned long host_cntrl) { u32 shadow_cntrl; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - shadow_cntrl = __get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_CONTROLS); + shadow_cntrl = get_vvmcs(v, VM_EXIT_CONTROLS); shadow_cntrl &= ~(VM_EXIT_SAVE_DEBUG_CNTRLS | VM_EXIT_LOAD_HOST_PAT | VM_EXIT_LOAD_HOST_EFER @@ -645,9 +630,8 @@ static void nvmx_update_exit_control(struct vcpu *v, unsigned long host_cntrl) static void nvmx_update_entry_control(struct vcpu *v) { u32 shadow_cntrl; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - shadow_cntrl = __get_vvmcs(nvcpu->nv_vvmcx, VM_ENTRY_CONTROLS); + shadow_cntrl = get_vvmcs(v, VM_ENTRY_CONTROLS); shadow_cntrl &= ~(VM_ENTRY_LOAD_GUEST_PAT | VM_ENTRY_LOAD_GUEST_EFER | VM_ENTRY_LOAD_PERF_GLOBAL_CTRL); @@ -661,7 +645,6 @@ void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value) static void nvmx_update_apic_access_address(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); u32 ctrl; ctrl = __n2_secondary_exec_control(v); @@ -671,7 +654,7 @@ static void nvmx_update_apic_access_address(struct vcpu *v) unsigned long apic_gpfn; struct page_info *apic_pg; - apic_gpfn = __get_vvmcs(nvcpu->nv_vvmcx, APIC_ACCESS_ADDR) >> PAGE_SHIFT; + apic_gpfn = get_vvmcs(v, APIC_ACCESS_ADDR) >> PAGE_SHIFT; apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC); ASSERT(apic_pg && !p2m_is_paging(p2mt)); __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg)); @@ -683,7 +666,6 @@ static void nvmx_update_apic_access_address(struct vcpu *v) static void nvmx_update_virtual_apic_address(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); u32 ctrl; ctrl = __n2_exec_control(v); @@ -693,7 +675,7 @@ static void nvmx_update_virtual_apic_address(struct vcpu *v) unsigned long vapic_gpfn; struct page_info *vapic_pg; - vapic_gpfn = __get_vvmcs(nvcpu->nv_vvmcx, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT; + vapic_gpfn = get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT; vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC); ASSERT(vapic_pg && !p2m_is_paging(p2mt)); __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg)); @@ -705,23 +687,20 @@ static void nvmx_update_virtual_apic_address(struct vcpu *v) static void nvmx_update_tpr_threshold(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); u32 ctrl = __n2_exec_control(v); + if ( ctrl & CPU_BASED_TPR_SHADOW ) - __vmwrite(TPR_THRESHOLD, __get_vvmcs(nvcpu->nv_vvmcx, TPR_THRESHOLD)); + __vmwrite(TPR_THRESHOLD, get_vvmcs(v, TPR_THRESHOLD)); else __vmwrite(TPR_THRESHOLD, 0); } static void nvmx_update_pfec(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - void *vvmcs = nvcpu->nv_vvmcx; - __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, - __get_vvmcs(vvmcs, PAGE_FAULT_ERROR_CODE_MASK)); + get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MASK)); __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, - __get_vvmcs(vvmcs, PAGE_FAULT_ERROR_CODE_MATCH)); + get_vvmcs(v, PAGE_FAULT_ERROR_CODE_MATCH)); } static void __clear_current_vvmcs(struct vcpu *v) @@ -739,7 +718,7 @@ static bool_t __must_check _map_msr_bitmap(struct vcpu *v) if ( nvmx->msrbitmap ) hvm_unmap_guest_frame(nvmx->msrbitmap, 1); - gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP); + gpa = get_vvmcs(v, MSR_BITMAP); nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1); return nvmx->msrbitmap != NULL; @@ -754,7 +733,7 @@ static bool_t __must_check _map_io_bitmap(struct vcpu *v, u64 vmcs_reg) index = vmcs_reg == IO_BITMAP_A ? 0 : 1; if (nvmx->iobitmap[index]) hvm_unmap_guest_frame(nvmx->iobitmap[index], 1); - gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg); + gpa = get_vvmcs(v, vmcs_reg); nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1); return nvmx->iobitmap[index] != NULL; @@ -777,6 +756,7 @@ static void nvmx_purge_vvmcs(struct vcpu *v) hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1); nvcpu->nv_vvmcx = NULL; nvcpu->nv_vvmcxaddr = VMCX_EADDR; + v->arch.hvm_vmx.vmcs_shadow_maddr = 0; for (i=0; i<2; i++) { if ( nvmx->iobitmap[i] ) { hvm_unmap_guest_frame(nvmx->iobitmap[i], 1); @@ -792,11 +772,10 @@ static void nvmx_purge_vvmcs(struct vcpu *v) u64 nvmx_get_tsc_offset(struct vcpu *v) { u64 offset = 0; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - if ( __get_vvmcs(nvcpu->nv_vvmcx, CPU_BASED_VM_EXEC_CONTROL) & + if ( get_vvmcs(v, CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_USE_TSC_OFFSETING ) - offset = __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET); + offset = get_vvmcs(v, TSC_OFFSET); return offset; } @@ -911,12 +890,9 @@ static struct vmcs_host_to_guest { {HOST_SYSENTER_EIP, GUEST_SYSENTER_EIP}, }; -static void vvmcs_to_shadow(void *vvmcs, unsigned int field) +static void vvmcs_to_shadow(const struct vcpu *v, unsigned int field) { - u64 value; - - value = __get_vvmcs(vvmcs, field); - __vmwrite(field, value); + __vmwrite(field, get_vvmcs(v, field)); } static void vvmcs_to_shadow_bulk(struct vcpu *v, unsigned int n, @@ -950,15 +926,15 @@ static void vvmcs_to_shadow_bulk(struct vcpu *v, unsigned int n, fallback: for ( i = 0; i < n; i++ ) - vvmcs_to_shadow(vvmcs, field[i]); + vvmcs_to_shadow(v, field[i]); } -static inline void shadow_to_vvmcs(void *vvmcs, unsigned int field) +static inline void shadow_to_vvmcs(const struct vcpu *v, unsigned int field) { unsigned long value; if ( __vmread_safe(field, &value) ) - __set_vvmcs(vvmcs, field, value); + set_vvmcs(v, field, value); } static void shadow_to_vvmcs_bulk(struct vcpu *v, unsigned int n, @@ -992,7 +968,7 @@ static void shadow_to_vvmcs_bulk(struct vcpu *v, unsigned int n, fallback: for ( i = 0; i < n; i++ ) - shadow_to_vvmcs(vvmcs, field[i]); + shadow_to_vvmcs(v, field[i]); } static void load_shadow_control(struct vcpu *v) @@ -1017,7 +993,6 @@ static void load_shadow_control(struct vcpu *v) static void load_shadow_guest_state(struct vcpu *v) { struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - void *vvmcs = nvcpu->nv_vvmcx; u32 control; u64 cr_gh_mask, cr_read_shadow; @@ -1031,18 +1006,18 @@ static void load_shadow_guest_state(struct vcpu *v) vvmcs_to_shadow_bulk(v, ARRAY_SIZE(vmcs_gstate_field), vmcs_gstate_field); - nvcpu->guest_cr[0] = __get_vvmcs(vvmcs, CR0_READ_SHADOW); - nvcpu->guest_cr[4] = __get_vvmcs(vvmcs, CR4_READ_SHADOW); - hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0), 1); - hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4), 1); - hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3), 1); + nvcpu->guest_cr[0] = get_vvmcs(v, CR0_READ_SHADOW); + nvcpu->guest_cr[4] = get_vvmcs(v, CR4_READ_SHADOW); + hvm_set_cr0(get_vvmcs(v, GUEST_CR0), 1); + hvm_set_cr4(get_vvmcs(v, GUEST_CR4), 1); + hvm_set_cr3(get_vvmcs(v, GUEST_CR3), 1); - control = __get_vvmcs(vvmcs, VM_ENTRY_CONTROLS); + control = get_vvmcs(v, VM_ENTRY_CONTROLS); if ( control & VM_ENTRY_LOAD_GUEST_PAT ) - hvm_set_guest_pat(v, __get_vvmcs(vvmcs, GUEST_PAT)); + hvm_set_guest_pat(v, get_vvmcs(v, GUEST_PAT)); if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL ) hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, - __get_vvmcs(vvmcs, GUEST_PERF_GLOBAL_CTRL), 0); + get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0); hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); @@ -1053,14 +1028,14 @@ static void load_shadow_guest_state(struct vcpu *v) * guest host mask to 0xffffffff in shadow VMCS (follow the host L1 VMCS), * then calculate the corresponding read shadow separately for CR0 and CR4. */ - cr_gh_mask = __get_vvmcs(vvmcs, CR0_GUEST_HOST_MASK); - cr_read_shadow = (__get_vvmcs(vvmcs, GUEST_CR0) & ~cr_gh_mask) | - (__get_vvmcs(vvmcs, CR0_READ_SHADOW) & cr_gh_mask); + cr_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK); + cr_read_shadow = (get_vvmcs(v, GUEST_CR0) & ~cr_gh_mask) | + (get_vvmcs(v, CR0_READ_SHADOW) & cr_gh_mask); __vmwrite(CR0_READ_SHADOW, cr_read_shadow); - cr_gh_mask = __get_vvmcs(vvmcs, CR4_GUEST_HOST_MASK); - cr_read_shadow = (__get_vvmcs(vvmcs, GUEST_CR4) & ~cr_gh_mask) | - (__get_vvmcs(vvmcs, CR4_READ_SHADOW) & cr_gh_mask); + cr_gh_mask = get_vvmcs(v, CR4_GUEST_HOST_MASK); + cr_read_shadow = (get_vvmcs(v, GUEST_CR4) & ~cr_gh_mask) | + (get_vvmcs(v, CR4_READ_SHADOW) & cr_gh_mask); __vmwrite(CR4_READ_SHADOW, cr_read_shadow); /* TODO: CR3 target control */ @@ -1084,11 +1059,11 @@ static uint64_t get_host_eptp(struct vcpu *v) return ept_get_eptp(ept_data); } -static bool_t nvmx_vpid_enabled(struct nestedvcpu *nvcpu) +static bool_t nvmx_vpid_enabled(const struct vcpu *v) { uint32_t second_cntl; - second_cntl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL); + second_cntl = get_vvmcs(v, SECONDARY_VM_EXEC_CONTROL); if ( second_cntl & SECONDARY_EXEC_ENABLE_VPID ) return 1; return 0; @@ -1099,32 +1074,38 @@ static void nvmx_set_vmcs_pointer(struct vcpu *v, struct vmcs_struct *vvmcs) unsigned long vvmcs_mfn = domain_page_map_to_mfn(vvmcs); paddr_t vvmcs_maddr = vvmcs_mfn << PAGE_SHIFT; - __vmpclear(vvmcs_maddr); - vvmcs->vmcs_revision_id |= VMCS_RID_TYPE_MASK; + if ( cpu_has_vmx_vmcs_shadowing ) + { + __vmpclear(vvmcs_maddr); + vvmcs->vmcs_revision_id |= VMCS_RID_TYPE_MASK; + __vmwrite(VMCS_LINK_POINTER, vvmcs_maddr); + __vmwrite(VMREAD_BITMAP, page_to_maddr(v->arch.hvm_vmx.vmread_bitmap)); + __vmwrite(VMWRITE_BITMAP, page_to_maddr(v->arch.hvm_vmx.vmwrite_bitmap)); + } v->arch.hvm_vmx.vmcs_shadow_maddr = vvmcs_maddr; - __vmwrite(VMCS_LINK_POINTER, vvmcs_maddr); - __vmwrite(VMREAD_BITMAP, page_to_maddr(v->arch.hvm_vmx.vmread_bitmap)); - __vmwrite(VMWRITE_BITMAP, page_to_maddr(v->arch.hvm_vmx.vmwrite_bitmap)); } static void nvmx_clear_vmcs_pointer(struct vcpu *v, struct vmcs_struct *vvmcs) { - unsigned long vvmcs_mfn = domain_page_map_to_mfn(vvmcs); - paddr_t vvmcs_maddr = vvmcs_mfn << PAGE_SHIFT; - - __vmpclear(vvmcs_maddr); - vvmcs->vmcs_revision_id &= ~VMCS_RID_TYPE_MASK; v->arch.hvm_vmx.vmcs_shadow_maddr = 0; - __vmwrite(VMCS_LINK_POINTER, ~0ul); - __vmwrite(VMREAD_BITMAP, 0); - __vmwrite(VMWRITE_BITMAP, 0); + + if ( cpu_has_vmx_vmcs_shadowing ) + { + unsigned long vvmcs_mfn = domain_page_map_to_mfn(vvmcs); + paddr_t vvmcs_maddr = vvmcs_mfn << PAGE_SHIFT; + + __vmpclear(vvmcs_maddr); + vvmcs->vmcs_revision_id &= ~VMCS_RID_TYPE_MASK; + __vmwrite(VMCS_LINK_POINTER, ~0ul); + __vmwrite(VMREAD_BITMAP, 0); + __vmwrite(VMWRITE_BITMAP, 0); + } } static void virtual_vmentry(struct cpu_user_regs *regs) { struct vcpu *v = current; struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - void *vvmcs = nvcpu->nv_vvmcx; unsigned long lm_l1, lm_l2; vmx_vmcs_switch(v->arch.hvm_vmx.vmcs_pa, nvcpu->nv_n2vmcx_pa); @@ -1143,8 +1124,7 @@ static void virtual_vmentry(struct cpu_user_regs *regs) * L1 exit_controls */ lm_l1 = !!hvm_long_mode_enabled(v); - lm_l2 = !!(__get_vvmcs(vvmcs, VM_ENTRY_CONTROLS) & - VM_ENTRY_IA32E_MODE); + lm_l2 = !!(get_vvmcs(v, VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE); if ( lm_l2 ) v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME; @@ -1161,9 +1141,9 @@ static void virtual_vmentry(struct cpu_user_regs *regs) !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) ) vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields); - regs->eip = __get_vvmcs(vvmcs, GUEST_RIP); - regs->esp = __get_vvmcs(vvmcs, GUEST_RSP); - regs->eflags = __get_vvmcs(vvmcs, GUEST_RFLAGS); + regs->eip = get_vvmcs(v, GUEST_RIP); + regs->esp = get_vvmcs(v, GUEST_RSP); + regs->eflags = get_vvmcs(v, GUEST_RFLAGS); /* updating host cr0 to sync TS bit */ __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0); @@ -1175,10 +1155,10 @@ static void virtual_vmentry(struct cpu_user_regs *regs) __vmwrite(EPT_POINTER, get_host_eptp(v)); /* nested VPID support! */ - if ( cpu_has_vmx_vpid && nvmx_vpid_enabled(nvcpu) ) + if ( cpu_has_vmx_vpid && nvmx_vpid_enabled(v) ) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); - uint32_t new_vpid = __get_vvmcs(vvmcs, VIRTUAL_PROCESSOR_ID); + uint32_t new_vpid = get_vvmcs(v, VIRTUAL_PROCESSOR_ID); if ( nvmx->guest_vpid != new_vpid ) { @@ -1191,34 +1171,29 @@ static void virtual_vmentry(struct cpu_user_regs *regs) static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - void *vvmcs = nvcpu->nv_vvmcx; - /* copy shadow vmcs.gstate back to vvmcs.gstate */ shadow_to_vvmcs_bulk(v, ARRAY_SIZE(vmcs_gstate_field), vmcs_gstate_field); /* RIP, RSP are in user regs */ - __set_vvmcs(vvmcs, GUEST_RIP, regs->eip); - __set_vvmcs(vvmcs, GUEST_RSP, regs->esp); + set_vvmcs(v, GUEST_RIP, regs->eip); + set_vvmcs(v, GUEST_RSP, regs->esp); /* CR3 sync if exec doesn't want cr3 load exiting: i.e. nested EPT */ if ( !(__n2_exec_control(v) & CPU_BASED_CR3_LOAD_EXITING) ) - shadow_to_vvmcs(vvmcs, GUEST_CR3); + shadow_to_vvmcs(v, GUEST_CR3); } static void sync_vvmcs_ro(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); struct nestedvmx *nvmx = &vcpu_2_nvmx(v); - void *vvmcs = nvcpu->nv_vvmcx; shadow_to_vvmcs_bulk(v, ARRAY_SIZE(vmcs_ro_field), vmcs_ro_field); /* Adjust exit_reason/exit_qualifciation for violation case */ - if ( __get_vvmcs(vvmcs, VM_EXIT_REASON) == EXIT_REASON_EPT_VIOLATION ) + if ( get_vvmcs(v, VM_EXIT_REASON) == EXIT_REASON_EPT_VIOLATION ) { - __set_vvmcs(vvmcs, EXIT_QUALIFICATION, nvmx->ept.exit_qual); - __set_vvmcs(vvmcs, VM_EXIT_REASON, nvmx->ept.exit_reason); + set_vvmcs(v, EXIT_QUALIFICATION, nvmx->ept.exit_qual); + set_vvmcs(v, VM_EXIT_REASON, nvmx->ept.exit_reason); } } @@ -1226,34 +1201,32 @@ static void load_vvmcs_host_state(struct vcpu *v) { int i; u64 r; - void *vvmcs = vcpu_nestedhvm(v).nv_vvmcx; u32 control; for ( i = 0; i < ARRAY_SIZE(vmcs_h2g_field); i++ ) { - r = __get_vvmcs(vvmcs, vmcs_h2g_field[i].host_field); + r = get_vvmcs(v, vmcs_h2g_field[i].host_field); __vmwrite(vmcs_h2g_field[i].guest_field, r); } - hvm_set_cr0(__get_vvmcs(vvmcs, HOST_CR0), 1); - hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4), 1); - hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3), 1); + hvm_set_cr0(get_vvmcs(v, HOST_CR0), 1); + hvm_set_cr4(get_vvmcs(v, HOST_CR4), 1); + hvm_set_cr3(get_vvmcs(v, HOST_CR3), 1); - control = __get_vvmcs(vvmcs, VM_EXIT_CONTROLS); + control = get_vvmcs(v, VM_EXIT_CONTROLS); if ( control & VM_EXIT_LOAD_HOST_PAT ) - hvm_set_guest_pat(v, __get_vvmcs(vvmcs, HOST_PAT)); + hvm_set_guest_pat(v, get_vvmcs(v, HOST_PAT)); if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL ) hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL, - __get_vvmcs(vvmcs, HOST_PERF_GLOBAL_CTRL), 1); + get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1); hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0); - __set_vvmcs(vvmcs, VM_ENTRY_INTR_INFO, 0); + set_vvmcs(v, VM_ENTRY_INTR_INFO, 0); } static void sync_exception_state(struct vcpu *v) { - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); struct nestedvmx *nvmx = &vcpu_2_nvmx(v); if ( !(nvmx->intr.intr_info & INTR_INFO_VALID_MASK) ) @@ -1263,10 +1236,9 @@ static void sync_exception_state(struct vcpu *v) { case X86_EVENTTYPE_EXT_INTR: /* rename exit_reason to EXTERNAL_INTERRUPT */ - __set_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_REASON, - EXIT_REASON_EXTERNAL_INTERRUPT); - __set_vvmcs(nvcpu->nv_vvmcx, EXIT_QUALIFICATION, 0); - __set_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_INTR_INFO, + set_vvmcs(v, VM_EXIT_REASON, EXIT_REASON_EXTERNAL_INTERRUPT); + set_vvmcs(v, EXIT_QUALIFICATION, 0); + set_vvmcs(v, VM_EXIT_INTR_INFO, nvmx->intr.intr_info); break; @@ -1274,17 +1246,13 @@ static void sync_exception_state(struct vcpu *v) case X86_EVENTTYPE_SW_INTERRUPT: case X86_EVENTTYPE_SW_EXCEPTION: /* throw to L1 */ - __set_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_INTR_INFO, - nvmx->intr.intr_info); - __set_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_INTR_ERROR_CODE, - nvmx->intr.error_code); + set_vvmcs(v, VM_EXIT_INTR_INFO, nvmx->intr.intr_info); + set_vvmcs(v, VM_EXIT_INTR_ERROR_CODE, nvmx->intr.error_code); break; case X86_EVENTTYPE_NMI: - __set_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_REASON, - EXIT_REASON_EXCEPTION_NMI); - __set_vvmcs(nvcpu->nv_vvmcx, EXIT_QUALIFICATION, 0); - __set_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_INTR_INFO, - nvmx->intr.intr_info); + set_vvmcs(v, VM_EXIT_REASON, EXIT_REASON_EXCEPTION_NMI); + set_vvmcs(v, EXIT_QUALIFICATION, 0); + set_vvmcs(v, VM_EXIT_INTR_INFO, nvmx->intr.intr_info); break; default: gdprintk(XENLOG_ERR, "Exception state %lx not handled\n", @@ -1296,9 +1264,8 @@ static void sync_exception_state(struct vcpu *v) static void nvmx_update_apicv(struct vcpu *v) { struct nestedvmx *nvmx = &vcpu_2_nvmx(v); - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); - unsigned long reason = __get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_REASON); - uint32_t intr_info = __get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_INTR_INFO); + unsigned long reason = get_vvmcs(v, VM_EXIT_REASON); + uint32_t intr_info = get_vvmcs(v, VM_EXIT_INTR_INFO); if ( reason == EXIT_REASON_EXTERNAL_INTERRUPT && nvmx->intr.source == hvm_intsrc_lapic && @@ -1344,8 +1311,7 @@ static void virtual_vmexit(struct cpu_user_regs *regs) nvcpu->nv_vmswitch_in_progress = 1; lm_l2 = !!hvm_long_mode_enabled(v); - lm_l1 = !!(__get_vvmcs(nvcpu->nv_vvmcx, VM_EXIT_CONTROLS) & - VM_EXIT_IA32E_MODE); + lm_l1 = !!(get_vvmcs(v, VM_EXIT_CONTROLS) & VM_EXIT_IA32E_MODE); if ( lm_l1 ) v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME; @@ -1361,8 +1327,8 @@ static void virtual_vmexit(struct cpu_user_regs *regs) if ( lm_l1 != lm_l2 ) paging_update_paging_modes(v); - regs->eip = __get_vvmcs(nvcpu->nv_vvmcx, HOST_RIP); - regs->esp = __get_vvmcs(nvcpu->nv_vvmcx, HOST_RSP); + regs->eip = get_vvmcs(v, HOST_RIP); + regs->esp = get_vvmcs(v, HOST_RSP); /* VM exit clears all bits except bit 1 */ regs->eflags = 0x2; @@ -1539,7 +1505,6 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs) { bool_t launched; struct vcpu *v = current; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); struct nestedvmx *nvmx = &vcpu_2_nvmx(v); int rc = vmx_inst_check_privilege(regs, 0); @@ -1553,7 +1518,7 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs) } launched = vvmcs_launched(&nvmx->launched_list, - domain_page_map_to_mfn(nvcpu->nv_vvmcx)); + PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr)); if ( !launched ) { vmreturn (regs, VMFAIL_VALID); return X86EMUL_OKAY; @@ -1565,7 +1530,6 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) { bool_t launched; struct vcpu *v = current; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); struct nestedvmx *nvmx = &vcpu_2_nvmx(v); int rc = vmx_inst_check_privilege(regs, 0); @@ -1579,7 +1543,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) } launched = vvmcs_launched(&nvmx->launched_list, - domain_page_map_to_mfn(nvcpu->nv_vvmcx)); + PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr)); if ( launched ) { vmreturn (regs, VMFAIL_VALID); return X86EMUL_OKAY; @@ -1589,7 +1553,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs) if ( rc == X86EMUL_OKAY ) { if ( set_vvmcs_launched(&nvmx->launched_list, - domain_page_map_to_mfn(nvcpu->nv_vvmcx)) < 0 ) + PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr)) < 0 ) return X86EMUL_UNHANDLEABLE; } } @@ -1644,8 +1608,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs) } } - if ( cpu_has_vmx_vmcs_shadowing ) - nvmx_set_vmcs_pointer(v, nvcpu->nv_vvmcx); + nvmx_set_vmcs_pointer(v, nvcpu->nv_vvmcx); vmreturn(regs, VMSUCCEED); @@ -1694,10 +1657,10 @@ int nvmx_handle_vmclear(struct cpu_user_regs *regs) rc = VMFAIL_INVALID; else if ( gpa == nvcpu->nv_vvmcxaddr ) { - if ( cpu_has_vmx_vmcs_shadowing ) - nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx); - clear_vvmcs_launched(&nvmx->launched_list, - domain_page_map_to_mfn(nvcpu->nv_vvmcx)); + unsigned long mfn = PFN_DOWN(v->arch.hvm_vmx.vmcs_shadow_maddr); + + nvmx_clear_vmcs_pointer(v, nvcpu->nv_vvmcx); + clear_vvmcs_launched(&nvmx->launched_list, mfn); nvmx_purge_vvmcs(v); } else @@ -1726,7 +1689,6 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs) { struct vcpu *v = current; struct vmx_inst_decoded decode; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); u64 value = 0; int rc; @@ -1734,7 +1696,7 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs) if ( rc != X86EMUL_OKAY ) return rc; - value = __get_vvmcs(nvcpu->nv_vvmcx, reg_read(regs, decode.reg2)); + value = get_vvmcs(v, reg_read(regs, decode.reg2)); switch ( decode.type ) { case VMX_INST_MEMREG_TYPE_MEMORY: @@ -1755,7 +1717,6 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs) { struct vcpu *v = current; struct vmx_inst_decoded decode; - struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); unsigned long operand; u64 vmcs_encoding; bool_t okay = 1; @@ -1765,7 +1726,7 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs) return X86EMUL_EXCEPTION; vmcs_encoding = reg_read(regs, decode.reg2); - __set_vvmcs(nvcpu->nv_vvmcx, vmcs_encoding, operand); + set_vvmcs(v, vmcs_encoding, operand); switch ( vmcs_encoding & ~VMCS_HIGH(0) ) { @@ -2197,7 +2158,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, } else if ( (intr_info & valid_mask) == valid_mask ) { - exec_bitmap =__get_vvmcs(nvcpu->nv_vvmcx, EXCEPTION_BITMAP); + exec_bitmap = get_vvmcs(v, EXCEPTION_BITMAP); if ( exec_bitmap & (1 << vector) ) nvcpu->nv_vmexit_pending = 1; @@ -2316,8 +2277,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, * special handler is needed if L1 doesn't intercept rdtsc, * avoiding changing guest_tsc and messing up timekeeping in L1 */ - tsc = hvm_get_guest_tsc(v); - tsc += __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET); + tsc = hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET); regs->eax = (uint32_t)tsc; regs->edx = (uint32_t)(tsc >> 32); update_guest_eip(); @@ -2406,7 +2366,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, val = *reg; if ( cr == 0 ) { - u64 cr0_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR0_GUEST_HOST_MASK); + u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK); __vmread(CR0_READ_SHADOW, &old_val); changed_bits = old_val ^ val; @@ -2414,14 +2374,15 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, nvcpu->nv_vmexit_pending = 1; else { - u64 guest_cr0 = __get_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0); - __set_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0, - (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask)); + u64 guest_cr0 = get_vvmcs(v, GUEST_CR0); + + set_vvmcs(v, GUEST_CR0, + (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask)); } } else if ( cr == 4 ) { - u64 cr4_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR4_GUEST_HOST_MASK); + u64 cr4_gh_mask = get_vvmcs(v, CR4_GUEST_HOST_MASK); __vmread(CR4_READ_SHADOW, &old_val); changed_bits = old_val ^ val; @@ -2429,9 +2390,10 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, nvcpu->nv_vmexit_pending = 1; else { - u64 guest_cr4 = __get_vvmcs(nvcpu->nv_vvmcx, GUEST_CR4); - __set_vvmcs(nvcpu->nv_vvmcx, GUEST_CR4, - (guest_cr4 & cr4_gh_mask) | (val & ~cr4_gh_mask)); + u64 guest_cr4 = get_vvmcs(v, GUEST_CR4); + + set_vvmcs(v, GUEST_CR4, + (guest_cr4 & cr4_gh_mask) | (val & ~cr4_gh_mask)); } } else @@ -2440,20 +2402,21 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, } case VMX_CONTROL_REG_ACCESS_TYPE_CLTS: { - u64 cr0_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR0_GUEST_HOST_MASK); + u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK); if ( cr0_gh_mask & X86_CR0_TS ) nvcpu->nv_vmexit_pending = 1; else { - u64 guest_cr0 = __get_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0); - __set_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0, (guest_cr0 & ~X86_CR0_TS)); + u64 guest_cr0 = get_vvmcs(v, GUEST_CR0); + + set_vvmcs(v, GUEST_CR0, (guest_cr0 & ~X86_CR0_TS)); } break; } case VMX_CONTROL_REG_ACCESS_TYPE_LMSW: { - u64 cr0_gh_mask = __get_vvmcs(nvcpu->nv_vvmcx, CR0_GUEST_HOST_MASK); + u64 cr0_gh_mask = get_vvmcs(v, CR0_GUEST_HOST_MASK); __vmread(CR0_READ_SHADOW, &old_val); old_val &= X86_CR0_PE|X86_CR0_MP|X86_CR0_EM|X86_CR0_TS; @@ -2464,8 +2427,9 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, nvcpu->nv_vmexit_pending = 1; else { - u64 guest_cr0 = __get_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0); - __set_vvmcs(nvcpu->nv_vvmcx, GUEST_CR0, (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask)); + u64 guest_cr0 = get_vvmcs(v, GUEST_CR0); + + set_vvmcs(v, GUEST_CR0, (guest_cr0 & cr0_gh_mask) | (val & ~cr0_gh_mask)); } break; } @@ -2517,7 +2481,7 @@ void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr) if ( !nestedhvm_vmswitch_in_progress(v) ) { unsigned long virtual_cr_mask = - __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, mask_field); + get_vvmcs(v, mask_field); /* * We get here when L2 changed cr in a way that did not change @@ -2529,7 +2493,7 @@ void nvmx_set_cr_read_shadow(struct vcpu *v, unsigned int cr) */ v->arch.hvm_vcpu.guest_cr[cr] &= ~virtual_cr_mask; v->arch.hvm_vcpu.guest_cr[cr] |= virtual_cr_mask & - __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, cr_field); + get_vvmcs(v, cr_field); } /* nvcpu.guest_cr is what L2 write to cr actually. */ diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index ad9798c..f873523 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -95,7 +95,7 @@ struct arch_vmx_struct { /* Physical address of VMCS. */ paddr_t vmcs_pa; /* VMCS shadow machine address. */ - paddr_t vmcs_shadow_maddr; + paddr_t vmcs_shadow_maddr; /* Protects remote usage of VMCS (VMPTRLD/VMCLEAR). */ spinlock_t vmcs_lock; @@ -492,10 +492,10 @@ void vmx_vmcs_switch(paddr_t from, paddr_t to); void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector); void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector); int vmx_check_msr_bitmap(unsigned long *msr_bitmap, u32 msr, int access_type); -void virtual_vmcs_enter(void *vvmcs); -void virtual_vmcs_exit(void *vvmcs); -u64 virtual_vmcs_vmread(void *vvmcs, u32 vmcs_encoding); -void virtual_vmcs_vmwrite(void *vvmcs, u32 vmcs_encoding, u64 val); +void virtual_vmcs_enter(const struct vcpu *); +void virtual_vmcs_exit(const struct vcpu *); +u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding); +void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val); static inline int vmx_add_guest_msr(u32 msr) { diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h index 60fdc21..aca8b4b 100644 --- a/xen/include/asm-x86/hvm/vmx/vvmx.h +++ b/xen/include/asm-x86/hvm/vmx/vvmx.h @@ -181,18 +181,20 @@ enum vvmcs_encoding_type { VVMCS_TYPE_HSTATE, }; -u64 __get_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding); -u64 __get_vvmcs_real(void *vvmcs, u32 vmcs_encoding); -void __set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val); -void __set_vvmcs_real(void *vvmcs, u32 vmcs_encoding, u64 val); - -#define __get_vvmcs(_vvmcs, _vmcs_encoding) \ - (cpu_has_vmx_vmcs_shadowing ? __get_vvmcs_real(_vvmcs, _vmcs_encoding) \ - : __get_vvmcs_virtual(_vvmcs, _vmcs_encoding)) - -#define __set_vvmcs(_vvmcs, _vmcs_encoding, _val) \ - (cpu_has_vmx_vmcs_shadowing ? __set_vvmcs_real(_vvmcs, _vmcs_encoding, _val) \ - : __set_vvmcs_virtual(_vvmcs, _vmcs_encoding, _val)) +u64 get_vvmcs_virtual(void *vvmcs, u32 encoding); +u64 get_vvmcs_real(const struct vcpu *, u32 encoding); +void set_vvmcs_virtual(void *vvmcs, u32 encoding, u64 val); +void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val); + +#define get_vvmcs(vcpu, encoding) \ + (cpu_has_vmx_vmcs_shadowing ? \ + get_vvmcs_real(vcpu, encoding) : \ + get_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding)) + +#define set_vvmcs(vcpu, encoding, val) \ + (cpu_has_vmx_vmcs_shadowing ? \ + set_vvmcs_real(vcpu, encoding, val) : \ + set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val)) uint64_t get_shadow_eptp(struct vcpu *v); -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |