[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 5/12] Add HVM support
Add HVM hardware feature suspend/resume. Signed-off-by Ke Yu <ke.yu@xxxxxxxxx> Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx> diff -r 42c975a55f32 xen/arch/x86/acpi/power.c --- a/xen/arch/x86/acpi/power.c Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/arch/x86/acpi/power.c Tue Apr 10 11:31:22 2007 -0400 @@ -159,8 +159,10 @@ int enter_state(u32 state) return -EBUSY; for_each_domain(d) - if (d->domain_id != 0) + if (d->domain_id != 0) { domain_pause(d); + arch_domain_suspend(d); + } pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n", acpi_states[state]); diff -r 42c975a55f32 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/arch/x86/domain.c Tue Apr 10 12:39:07 2007 -0400 @@ -1556,6 +1556,16 @@ void arch_dump_vcpu_info(struct vcpu *v) paging_dump_vcpu_info(v); } +/* suspend hook called by Xen S3 code + assumption: domain has already been paused*/ +int arch_domain_suspend(struct domain *d) +{ + if (is_hvm_domain(d)){ + hvm_suspend(d); + } + return 0; +} + /* * Local variables: * mode: C diff -r 42c975a55f32 xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Tue Apr 10 12:44:02 2007 -0400 @@ -170,7 +170,7 @@ static void __vmx_clear_vmcs(void *info) v->arch.hvm_vmx.launched = 0; } -static void vmx_clear_vmcs(struct vcpu *v) +void vmx_clear_vmcs(struct vcpu *v) { int cpu = v->arch.hvm_vmx.active_cpu; diff -r 42c975a55f32 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Apr 10 12:42:30 2007 -0400 @@ -51,6 +51,11 @@ #include <public/hvm/save.h> #include <asm/hvm/trace.h> +static DEFINE_PER_CPU(struct vmcs_struct*, host_vmcs); + +static void vmx_suspend(struct domain *d); +static int vmx_resume(void); + static void vmx_ctxt_switch_from(struct vcpu *v); static void vmx_ctxt_switch_to(struct vcpu *v); @@ -1024,7 +1029,9 @@ static struct hvm_function_table vmx_fun .inject_exception = vmx_inject_exception, .init_ap_context = vmx_init_ap_context, .init_hypercall_page = vmx_init_hypercall_page, - .event_injection_faulted = vmx_event_injection_faulted + .event_injection_faulted = vmx_event_injection_faulted, + .suspend = vmx_suspend, + .resume = vmx_resume }; int start_vmx(void) @@ -1080,6 +1087,8 @@ int start_vmx(void) vmx_free_host_vmcs(vmcs); return 0; } + + this_cpu(host_vmcs) = vmcs; vmx_save_host_msrs(); @@ -2782,6 +2791,34 @@ asmlinkage void vmx_trace_vmentry(void) HVMTRACE_0D(VMENTRY, v); } +/* Suspend target domain with VMCS sync-ed */ +void vmx_suspend(struct domain* d){ + struct vcpu *v; + + for_each_vcpu(d, v) { + spin_lock(&v->arch.hvm_vmx.vmcs_lock); + vmx_clear_vmcs(v); + spin_unlock(&v->arch.hvm_vmx.vmcs_lock); + } +} + +/* Resume vmx feature on the given cpu, required by Sx resume path */ +static int vmx_resume(void){ + + struct vmcs_struct *vmcs = this_cpu(host_vmcs); + + if ( __vmxon(virt_to_maddr(vmcs)) ) + { + clear_in_cr4(X86_CR4_VMXE); + printk("VMXON failed\n"); + vmx_free_host_vmcs(vmcs); + return 0; + } + + printk("VMXON is done\n"); + return 1; +} + /* * Local variables: * mode: C diff -r 42c975a55f32 xen/arch/x86/x86_32/power/cpu.c --- a/xen/arch/x86/x86_32/power/cpu.c Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/arch/x86/x86_32/power/cpu.c Tue Apr 10 12:36:32 2007 -0400 @@ -19,6 +19,8 @@ #include <asm/processor.h> #include <asm/msr.h> #include <asm/flushtlb.h> +#include <asm/hvm/hvm.h> +#include <asm/hvm/support.h> /* image of the saved processor state */ struct saved_context { @@ -234,6 +236,12 @@ void __restore_processor_state(struct sa do_fpu_end(); mtrr_ap_init(); mcheck_init(&boot_cpu_data); + +#ifdef __XEN__ + if (hvm_enabled){ + hvm_resume(); + } +#endif } void restore_processor_state(void) diff -r 42c975a55f32 xen/arch/x86/x86_64/power/suspend.c --- a/xen/arch/x86/x86_64/power/suspend.c Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/arch/x86/x86_64/power/suspend.c Tue Apr 10 12:39:59 2007 -0400 @@ -151,6 +151,12 @@ void __restore_processor_state(struct sa do_fpu_end(); mtrr_ap_init(); + +#ifdef __XEN__ + if (hvm_enabled){ + hvm_resume(); + } +#endif /*__XEN__*/ } void restore_processor_state(void) diff -r 42c975a55f32 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/include/asm-x86/hvm/hvm.h Tue Apr 10 12:37:36 2007 -0400 @@ -136,6 +136,10 @@ struct hvm_function_table { void (*init_hypercall_page)(struct domain *d, void *hypercall_page); int (*event_injection_faulted)(struct vcpu *v); + + void (*suspend)(struct domain *d); + + int (*resume)(void); }; extern struct hvm_function_table hvm_funcs; @@ -257,4 +261,20 @@ static inline int hvm_event_injection_fa return hvm_funcs.event_injection_faulted(v); } +static inline void +hvm_suspend(struct domain* d) +{ + if (hvm_funcs.suspend) + hvm_funcs.suspend(d); +} + +static inline int +hvm_resume(void) +{ + if (hvm_funcs.resume){ + return hvm_funcs.resume(); + } + return 1; +} + #endif /* __ASM_X86_HVM_HVM_H__ */ diff -r 42c975a55f32 xen/include/asm-x86/hvm/vmx/vmcs.h --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Tue Apr 10 12:44:28 2007 -0400 @@ -28,6 +28,7 @@ extern void vmcs_dump_vcpu(void); extern void vmcs_dump_vcpu(void); extern void vmx_init_vmcs_config(void); extern void setup_vmcs_dump(void); +extern void vmx_clear_vmcs(struct vcpu *v); struct vmcs_struct { u32 vmcs_revision_id; diff -r 42c975a55f32 xen/include/xen/domain.h --- a/xen/include/xen/domain.h Tue Apr 10 11:29:16 2007 -0400 +++ b/xen/include/xen/domain.h Tue Apr 10 12:22:12 2007 -0400 @@ -55,4 +55,6 @@ void arch_dump_domain_info(struct domain int arch_vcpu_reset(struct vcpu *v); +int arch_domain_suspend(struct domain *d); + #endif /* __XEN_DOMAIN_H__ */ Attachment:
hvm_context.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |