[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 07/24] PVH xen: vmx related preparatory changes for PVH
>>> On 18.07.13 at 04:32, Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> wrote: > This is another preparatory patch for PVH. In this patch, following > functions are made available for general/public use: > vmx_fpu_enter(), get_instruction_length(), update_guest_eip(), > and vmx_dr_access(). > > There is no functionality change. > > Changes in V2: > - prepend vmx_ to get_instruction_length and update_guest_eip. > - Do not export/use vmr(). > > Changes in V3: > - Do not change emulate_forced_invalid_op() in this patch. > > Changes in V7: > - Drop pv_cpuid going public here. > > Changes in V8: > - Move vmx_fpu_enter prototype from vmcs.h to vmx.h > > Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> > Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> > --- > xen/arch/x86/hvm/vmx/vmx.c | 72 +++++++++++++++--------------------- > xen/arch/x86/hvm/vmx/vvmx.c | 2 +- > xen/include/asm-x86/hvm/vmx/vmx.h | 17 ++++++++- > 3 files changed, 47 insertions(+), 44 deletions(-) > > diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c > index d6540e3..195f9ed 100644 > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -577,7 +577,7 @@ static int vmx_load_vmcs_ctxt(struct vcpu *v, struct > hvm_hw_cpu *ctxt) > return 0; > } > > -static void vmx_fpu_enter(struct vcpu *v) > +void vmx_fpu_enter(struct vcpu *v) > { > vcpu_restore_fpu_lazy(v); > v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device); > @@ -1597,24 +1597,12 @@ const struct hvm_function_table * __init > start_vmx(void) > return &vmx_function_table; > } > > -/* > - * Not all cases receive valid value in the VM-exit instruction length field. > - * Callers must know what they're doing! > - */ > -static int get_instruction_length(void) > -{ > - int len; > - len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */ > - BUG_ON((len < 1) || (len > 15)); > - return len; > -} > - > -void update_guest_eip(void) > +void vmx_update_guest_eip(void) > { > struct cpu_user_regs *regs = guest_cpu_user_regs(); > unsigned long x; > > - regs->eip += get_instruction_length(); /* Safe: callers audited */ > + regs->eip += vmx_get_instruction_length(); /* Safe: callers audited */ > regs->eflags &= ~X86_EFLAGS_RF; > > x = __vmread(GUEST_INTERRUPTIBILITY_INFO); > @@ -1687,8 +1675,8 @@ static void vmx_do_cpuid(struct cpu_user_regs *regs) > regs->edx = edx; > } > > -static void vmx_dr_access(unsigned long exit_qualification, > - struct cpu_user_regs *regs) > +void vmx_dr_access(unsigned long exit_qualification, > + struct cpu_user_regs *regs) > { > struct vcpu *v = current; > > @@ -2301,7 +2289,7 @@ static int vmx_handle_eoi_write(void) > if ( (((exit_qualification >> 12) & 0xf) == 1) && > ((exit_qualification & 0xfff) == APIC_EOI) ) > { > - update_guest_eip(); /* Safe: APIC data write */ > + vmx_update_guest_eip(); /* Safe: APIC data write */ > vlapic_EOI_set(vcpu_vlapic(current)); > HVMTRACE_0D(VLAPIC); > return 1; > @@ -2514,7 +2502,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > HVMTRACE_1D(TRAP, vector); > if ( v->domain->debugger_attached ) > { > - update_guest_eip(); /* Safe: INT3 */ > + vmx_update_guest_eip(); /* Safe: INT3 */ > current->arch.gdbsx_vcpu_event = TRAP_int3; > domain_pause_for_debugger(); > break; > @@ -2622,7 +2610,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > */ > inst_len = ((source != 3) || /* CALL, IRET, or JMP? */ > (idtv_info & (1u<<10))) /* IntrType > 3? */ > - ? get_instruction_length() /* Safe: SDM 3B 23.2.4 */ : 0; > + ? vmx_get_instruction_length() /* Safe: SDM 3B 23.2.4 */ : 0; > if ( (source == 3) && (idtv_info & INTR_INFO_DELIVER_CODE_MASK) ) > ecode = __vmread(IDT_VECTORING_ERROR_CODE); > regs->eip += inst_len; > @@ -2630,15 +2618,15 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > break; > } > case EXIT_REASON_CPUID: > - update_guest_eip(); /* Safe: CPUID */ > + vmx_update_guest_eip(); /* Safe: CPUID */ > vmx_do_cpuid(regs); > break; > case EXIT_REASON_HLT: > - update_guest_eip(); /* Safe: HLT */ > + vmx_update_guest_eip(); /* Safe: HLT */ > hvm_hlt(regs->eflags); > break; > case EXIT_REASON_INVLPG: > - update_guest_eip(); /* Safe: INVLPG */ > + vmx_update_guest_eip(); /* Safe: INVLPG */ > exit_qualification = __vmread(EXIT_QUALIFICATION); > vmx_invlpg_intercept(exit_qualification); > break; > @@ -2646,7 +2634,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > regs->ecx = hvm_msr_tsc_aux(v); > /* fall through */ > case EXIT_REASON_RDTSC: > - update_guest_eip(); /* Safe: RDTSC, RDTSCP */ > + vmx_update_guest_eip(); /* Safe: RDTSC, RDTSCP */ > hvm_rdtsc_intercept(regs); > break; > case EXIT_REASON_VMCALL: > @@ -2656,7 +2644,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > rc = hvm_do_hypercall(regs); > if ( rc != HVM_HCALL_preempted ) > { > - update_guest_eip(); /* Safe: VMCALL */ > + vmx_update_guest_eip(); /* Safe: VMCALL */ > if ( rc == HVM_HCALL_invalidate ) > send_invalidate_req(); > } > @@ -2666,7 +2654,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > { > exit_qualification = __vmread(EXIT_QUALIFICATION); > if ( vmx_cr_access(exit_qualification) == X86EMUL_OKAY ) > - update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */ > + vmx_update_guest_eip(); /* Safe: MOV Cn, LMSW, CLTS */ > break; > } > case EXIT_REASON_DR_ACCESS: > @@ -2680,7 +2668,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > { > regs->eax = (uint32_t)msr_content; > regs->edx = (uint32_t)(msr_content >> 32); > - update_guest_eip(); /* Safe: RDMSR */ > + vmx_update_guest_eip(); /* Safe: RDMSR */ > } > break; > } > @@ -2689,63 +2677,63 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > uint64_t msr_content; > msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax; > if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY > ) > - update_guest_eip(); /* Safe: WRMSR */ > + vmx_update_guest_eip(); /* Safe: WRMSR */ > break; > } > > case EXIT_REASON_VMXOFF: > if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_VMXON: > if ( nvmx_handle_vmxon(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_VMCLEAR: > if ( nvmx_handle_vmclear(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_VMPTRLD: > if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_VMPTRST: > if ( nvmx_handle_vmptrst(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_VMREAD: > if ( nvmx_handle_vmread(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_VMWRITE: > if ( nvmx_handle_vmwrite(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_VMLAUNCH: > if ( nvmx_handle_vmlaunch(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_VMRESUME: > if ( nvmx_handle_vmresume(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_INVEPT: > if ( nvmx_handle_invept(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_INVVPID: > if ( nvmx_handle_invvpid(regs) == X86EMUL_OKAY ) > - update_guest_eip(); > + vmx_update_guest_eip(); > break; > > case EXIT_REASON_MWAIT_INSTRUCTION: > @@ -2793,14 +2781,14 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > int bytes = (exit_qualification & 0x07) + 1; > int dir = (exit_qualification & 0x08) ? IOREQ_READ : > IOREQ_WRITE; > if ( handle_pio(port, bytes, dir) ) > - update_guest_eip(); /* Safe: IN, OUT */ > + vmx_update_guest_eip(); /* Safe: IN, OUT */ > } > break; > > case EXIT_REASON_INVD: > case EXIT_REASON_WBINVD: > { > - update_guest_eip(); /* Safe: INVD, WBINVD */ > + vmx_update_guest_eip(); /* Safe: INVD, WBINVD */ > vmx_wbinvd_intercept(); > break; > } > @@ -2832,7 +2820,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs) > case EXIT_REASON_XSETBV: > if ( hvm_handle_xsetbv(regs->ecx, > (regs->rdx << 32) | regs->_eax) == 0 ) > - update_guest_eip(); /* Safe: XSETBV */ > + vmx_update_guest_eip(); /* Safe: XSETBV */ > break; > > case EXIT_REASON_APIC_WRITE: > diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c > index 5dfbc54..82be4cc 100644 > --- a/xen/arch/x86/hvm/vmx/vvmx.c > +++ b/xen/arch/x86/hvm/vmx/vvmx.c > @@ -2139,7 +2139,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs, > tsc += __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET); > regs->eax = (uint32_t)tsc; > regs->edx = (uint32_t)(tsc >> 32); > - update_guest_eip(); > + vmx_update_guest_eip(); > > return 1; > } > diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h > b/xen/include/asm-x86/hvm/vmx/vmx.h > index c33b9f9..c21a303 100644 > --- a/xen/include/asm-x86/hvm/vmx/vmx.h > +++ b/xen/include/asm-x86/hvm/vmx/vmx.h > @@ -446,6 +446,18 @@ static inline int __vmxon(u64 addr) > return rc; > } > > +/* > + * Not all cases receive valid value in the VM-exit instruction length > field. > + * Callers must know what they're doing! > + */ > +static inline int vmx_get_instruction_length(void) > +{ > + int len; > + len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */ > + BUG_ON((len < 1) || (len > 15)); > + return len; > +} > + > void vmx_get_segment_register(struct vcpu *, enum x86_segment, > struct segment_register *); > void vmx_inject_extint(int trap); > @@ -457,7 +469,10 @@ void ept_p2m_uninit(struct p2m_domain *p2m); > void ept_walk_table(struct domain *d, unsigned long gfn); > void setup_ept_dump(void); > > -void update_guest_eip(void); > +void vmx_update_guest_eip(void); > +void vmx_dr_access(unsigned long exit_qualification, > + struct cpu_user_regs *regs); > +void vmx_fpu_enter(struct vcpu *v); > > int alloc_p2m_hap_data(struct p2m_domain *p2m); > void free_p2m_hap_data(struct p2m_domain *p2m); > -- > 1.7.2.3 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@xxxxxxxxxxxxx > http://lists.xen.org/xen-devel _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |