[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] add nestedhvm function hooks for svm/vmx specific code
# HG changeset patch # User cegger # Date 1298892104 -3600 # Node ID 5a137177a6dfc8107481aed16e0876f86b4771ac # Parent 029f0a5301f05afc55c85d91a4ce21e35b911772 add nestedhvm function hooks for svm/vmx specific code Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx> Acked-by: Eddie Dong <eddie.dong@xxxxxxxxx> Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> Committed-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx> --- diff -r 029f0a5301f0 -r 5a137177a6df xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Mon Feb 28 12:21:41 2011 +0100 +++ b/xen/arch/x86/hvm/hvm.c Mon Feb 28 12:21:44 2011 +0100 @@ -3889,6 +3889,89 @@ } #endif /* __x86_64__ */ +int nhvm_vcpu_initialise(struct vcpu *v) +{ + if (hvm_funcs.nhvm_vcpu_initialise) + return hvm_funcs.nhvm_vcpu_initialise(v); + return -EOPNOTSUPP; +} + +int nhvm_vcpu_destroy(struct vcpu *v) +{ + if (hvm_funcs.nhvm_vcpu_destroy) + return hvm_funcs.nhvm_vcpu_destroy(v); + return -EOPNOTSUPP; +} + +int nhvm_vcpu_reset(struct vcpu *v) +{ + if (hvm_funcs.nhvm_vcpu_reset) + return hvm_funcs.nhvm_vcpu_reset(v); + return -EOPNOTSUPP; +} + +int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs) +{ + if (hvm_funcs.nhvm_vcpu_hostrestore) + return hvm_funcs.nhvm_vcpu_hostrestore(v, regs); + return -EOPNOTSUPP; +} + +int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t exitcode) +{ + if (hvm_funcs.nhvm_vcpu_vmexit) + return hvm_funcs.nhvm_vcpu_vmexit(v, regs, exitcode); + return -EOPNOTSUPP; +} + +int +nhvm_vcpu_vmexit_trap(struct vcpu *v, unsigned int trapnr, + int errcode, unsigned long cr2) +{ + return hvm_funcs.nhvm_vcpu_vmexit_trap(v, trapnr, errcode, cr2); +} + +uint64_t nhvm_vcpu_guestcr3(struct vcpu *v) +{ + if (hvm_funcs.nhvm_vcpu_guestcr3) + return hvm_funcs.nhvm_vcpu_guestcr3(v); + return -EOPNOTSUPP; +} + +uint64_t nhvm_vcpu_hostcr3(struct vcpu *v) +{ + if (hvm_funcs.nhvm_vcpu_hostcr3) + return hvm_funcs.nhvm_vcpu_hostcr3(v); + return -EOPNOTSUPP; +} + +uint32_t nhvm_vcpu_asid(struct vcpu *v) +{ + if (hvm_funcs.nhvm_vcpu_asid) + return hvm_funcs.nhvm_vcpu_asid(v); + return -EOPNOTSUPP; +} + +int nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, unsigned int trap) +{ + if (hvm_funcs.nhvm_vmcx_guest_intercepts_trap) + return hvm_funcs.nhvm_vmcx_guest_intercepts_trap(v, trap); + return -EOPNOTSUPP; +} + +bool_t nhvm_vmcx_hap_enabled(struct vcpu *v) +{ + if (hvm_funcs.nhvm_vmcx_hap_enabled) + return hvm_funcs.nhvm_vmcx_hap_enabled(v); + return -EOPNOTSUPP; +} + +enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v) +{ + return hvm_funcs.nhvm_intr_blocked(v); +} + /* * Local variables: * mode: C diff -r 029f0a5301f0 -r 5a137177a6df xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Mon Feb 28 12:21:41 2011 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Mon Feb 28 12:21:44 2011 +0100 @@ -145,6 +145,27 @@ void (*set_uc_mode)(struct vcpu *v); void (*set_info_guest)(struct vcpu *v); void (*set_rdtsc_exiting)(struct vcpu *v, bool_t); + + /* Nested HVM */ + int (*nhvm_vcpu_initialise)(struct vcpu *v); + int (*nhvm_vcpu_destroy)(struct vcpu *v); + int (*nhvm_vcpu_reset)(struct vcpu *v); + int (*nhvm_vcpu_hostrestore)(struct vcpu *v, + struct cpu_user_regs *regs); + int (*nhvm_vcpu_vmexit)(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t exitcode); + int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, + unsigned int trapnr, + int errcode, + unsigned long cr2); + uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v); + uint64_t (*nhvm_vcpu_hostcr3)(struct vcpu *v); + uint32_t (*nhvm_vcpu_asid)(struct vcpu *v); + int (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v, unsigned int trapnr); + + bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v); + + enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v); }; extern struct hvm_function_table hvm_funcs; @@ -378,7 +399,6 @@ void hvm_memory_event_cr0(unsigned long value, unsigned long old); void hvm_memory_event_cr3(unsigned long value, unsigned long old); void hvm_memory_event_cr4(unsigned long value, unsigned long old); - /* Called for current VCPU on int3: returns -1 if no listener */ int hvm_memory_event_int3(unsigned long gla); #else @@ -392,4 +412,44 @@ { return 0; } #endif +/* + * Nested HVM + */ + +/* Initialize vcpu's struct nestedhvm */ +int nhvm_vcpu_initialise(struct vcpu *v); +/* Destroy and free vcpu's struct nestedhvm */ +int nhvm_vcpu_destroy(struct vcpu *v); +/* Reset vcpu's state when l1 guest disables nested virtualization */ +int nhvm_vcpu_reset(struct vcpu *v); +/* Restores l1 guest state */ +int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs); +/* Fill l1 guest's VMCB/VMCS with data provided by generic exit codes + * (do conversion as needed), other misc SVM/VMX specific tweaks to make + * it work */ +int nhvm_vcpu_vmexit(struct vcpu *v, struct cpu_user_regs *regs, + uint64_t exitcode); +/* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to + * 'trapnr' exception. + */ +int nhvm_vcpu_vmexit_trap(struct vcpu *v, + unsigned int trapnr, int errcode, unsigned long cr2); + +/* returns l2 guest cr3 in l2 guest physical address space. */ +uint64_t nhvm_vcpu_guestcr3(struct vcpu *v); +/* returns l1 guest's cr3 that points to the page table used to + * translate l2 guest physical address to l1 guest physical address. + */ +uint64_t nhvm_vcpu_hostcr3(struct vcpu *v); +/* returns the asid number l1 guest wants to use to run the l2 guest */ +uint32_t nhvm_vcpu_asid(struct vcpu *v); + +/* returns true, when l1 guest intercepts the specified trap */ +int nhvm_vmcx_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr); + +/* returns true when l1 guest wants to use hap to run l2 guest */ +bool_t nhvm_vmcx_hap_enabled(struct vcpu *v); +/* interrupt */ +enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v); + #endif /* __ASM_X86_HVM_HVM_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |