[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 8/9] x86/hvm: Add hvm_copy_{to, from}_guest_virt() helpers
hvm_copy_{to,from}_guest_virt() copy data to and from a guest, performing segmentatino and paging checks on the provided seg:offset virtual address. Signed-off-by: Euan Harris <euan.harris@xxxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 57 +++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/hvm/support.h | 12 +++++++++ 2 files changed, 69 insertions(+) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 205b4cb685..5d2bdd6b2b 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3312,6 +3312,63 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len) return rc ? len : 0; /* fake a copy_from_user() return code */ } +static int _hvm_copy_guest_virt( + enum x86_segment seg, unsigned long offset, void *buf, unsigned int bytes, + uint32_t pfec, unsigned int flags) +{ + struct vcpu *curr = current; + struct segment_register sreg, cs; + enum hvm_translation_result res; + pagefault_info_t pfinfo; + unsigned long linear; + + ASSERT(is_x86_user_segment(seg)); + + hvm_get_segment_register(curr, seg, &sreg); + hvm_get_segment_register(curr, x86_seg_cs, &cs); + + if ( !hvm_virtual_to_linear_addr( + seg, &sreg, offset, bytes, + flags & HVMCOPY_to_guest ? hvm_access_write : hvm_access_read, + &cs, &linear) ) + { + hvm_inject_hw_exception( + (seg == x86_seg_ss) ? TRAP_stack_error : TRAP_gp_fault, 0); + return X86EMUL_EXCEPTION; + } + + if ( flags & HVMCOPY_to_guest ) + res = hvm_copy_to_guest_linear(linear, buf, bytes, pfec, &pfinfo); + else + res = hvm_copy_from_guest_linear(buf, linear, bytes, pfec, &pfinfo); + + if ( res == HVMTRANS_bad_linear_to_gfn ) + { + hvm_inject_page_fault(pfinfo.ec, pfinfo.linear); + return X86EMUL_EXCEPTION; + } + else if ( res ) + return X86EMUL_RETRY; + + return X86EMUL_OKAY; +} + +int hvm_copy_to_guest_virt( + enum x86_segment seg, unsigned long offset, void *buf, unsigned int bytes, + uint32_t pfec) +{ + return _hvm_copy_guest_virt(seg, offset, buf, bytes, pfec, + HVMCOPY_to_guest); +} + +int hvm_copy_from_guest_virt( + void *buf, enum x86_segment seg, unsigned long offset, unsigned int bytes, + uint32_t pfec) +{ + return _hvm_copy_guest_virt(seg, offset, buf, bytes, pfec, + HVMCOPY_from_guest); +} + bool hvm_check_cpuid_faulting(struct vcpu *v) { const struct msr_vcpu_policy *vp = v->arch.msr; diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index d784fc1856..9af2ae77b7 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -115,6 +115,18 @@ enum hvm_translation_result hvm_translate_get_page( pagefault_info_t *pfinfo, struct page_info **page_p, gfn_t *gfn_p, p2m_type_t *p2mt_p); +/* + * Copy data to and from a guest, performing segmentation and paging checks + * on the provided seg:offset virtual address. + * Returns X86EMUL_* and raises exceptions with the current vcpu. + */ +int hvm_copy_to_guest_virt( + enum x86_segment seg, unsigned long offset, void *buf, unsigned int bytes, + uint32_t pfec); +int hvm_copy_from_guest_virt( + void *buf, enum x86_segment seg, unsigned long offset, unsigned int bytes, + uint32_t pfec); + #define HVM_HCALL_completed 0 /* hypercall completed - no further action */ #define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute VMCALL */ int hvm_hypercall(struct cpu_user_regs *regs); -- 2.13.6 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |