[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] vmx_fpu.patch
Implement a eager save/lazy restore algorithm for dealing with the FP state of a VMX guest. Signed-off-by: Xin B Li <xin.b.li@xxxxxxxxx> Signed-off-by: Asit Mallick <asit.k.mallick@xxxxxxxxx> Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx> --- 1.171/xen/arch/x86/traps.c 2005-05-10 03:53:31 -07:00 +++ edited/xen/arch/x86/traps.c 2005-05-11 11:09:00 -07:00 @@ -917,15 +917,7 @@ asmlinkage int math_state_restore(struct cpu_user_regs *regs) { /* Prevent recursion. */ - clts(); - - if ( !test_and_set_bit(EDF_USEDFPU, ¤t->flags) ) - { - if ( test_bit(EDF_DONEFPUINIT, ¤t->flags) ) - restore_fpu(current); - else - init_fpu(); - } + do_clts(); if ( test_and_clear_bit(EDF_GUEST_STTS, ¤t->flags) ) { ===== xen/arch/x86/vmx.c 1.48 vs edited ===== --- 1.48/xen/arch/x86/vmx.c 2005-05-09 07:33:15 -07:00 +++ edited/xen/arch/x86/vmx.c 2005-05-11 11:09:28 -07:00 @@ -154,6 +154,20 @@ return result; } +static void vmx_do_no_device_fault() +{ + unsigned long cr0; + + do_clts(); + __vmread(CR0_READ_SHADOW, &cr0); + if (!(cr0 & X86_CR0_TS)) { + __vmread(GUEST_CR0, &cr0); + cr0 &= ~X86_CR0_TS; + __vmwrite(GUEST_CR0, cr0); + } + __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP); +} + static void vmx_do_general_protection_fault(struct cpu_user_regs *regs) { unsigned long eip, error_code; @@ -894,6 +908,8 @@ mov_from_cr(cr, gp, regs); break; case TYPE_CLTS: + do_clts(); + __vmread(GUEST_CR0, &value); value &= ~X86_CR0_TS; /* clear TS */ __vmwrite(GUEST_CR0, value); @@ -1093,6 +1109,11 @@ break; } #endif + case TRAP_no_device: + { + vmx_do_no_device_fault(); + break; + } case TRAP_gp_fault: { vmx_do_general_protection_fault(®s); ===== xen/arch/x86/vmx_io.c 1.22 vs edited ===== --- 1.22/xen/arch/x86/vmx_io.c 2005-04-28 07:04:10 -07:00 +++ edited/xen/arch/x86/vmx_io.c 2005-05-11 11:02:52 -07:00 @@ -429,6 +429,7 @@ void vmx_do_resume(struct exec_domain *d) { + vmx_stts(); if ( test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state) ) __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table)); else ===== xen/arch/x86/vmx_vmcs.c 1.21 vs edited ===== --- 1.21/xen/arch/x86/vmx_vmcs.c 2005-04-28 07:04:10 -07:00 +++ edited/xen/arch/x86/vmx_vmcs.c 2005-05-11 11:05:12 -07:00 @@ -164,6 +164,9 @@ struct pfn_info *page; struct cpu_user_regs *regs = get_cpu_user_regs(); + vmx_stts(); + set_bit(EDF_GUEST_STTS, &ed->flags); + cpu = smp_processor_id(); page = (struct pfn_info *) alloc_domheap_page(NULL); ===== xen/include/asm-x86/i387.h 1.9 vs edited ===== --- 1.9/xen/include/asm-x86/i387.h 2005-05-08 05:06:08 -07:00 +++ edited/xen/include/asm-x86/i387.h 2005-05-11 11:16:26 -07:00 @@ -28,4 +28,17 @@ __asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) ); \ } while ( 0 ) +/* Make current domain the FPU owner */ +static inline void do_clts() +{ + clts(); + if ( !test_and_set_bit(EDF_USEDFPU, ¤t->flags) ) + { + if ( test_bit(EDF_DONEFPUINIT, ¤t->flags) ) + restore_fpu(current); + else + init_fpu(); + } +} + #endif /* __ASM_I386_I387_H */ ===== xen/include/asm-x86/vmx.h 1.6 vs edited ===== --- 1.6/xen/include/asm-x86/vmx.h 2005-04-28 07:04:11 -07:00 +++ edited/xen/include/asm-x86/vmx.h 2005-05-11 11:13:28 -07:00 @@ -24,6 +24,7 @@ #include <asm/regs.h> #include <asm/processor.h> #include <asm/vmx_vmcs.h> +#include <asm/i387.h> extern void vmx_asm_vmexit_handler(struct cpu_user_regs); extern void vmx_asm_do_resume(void); @@ -251,4 +252,19 @@ return 0; } +/* Make sure that xen intercepts any FP accesses from current */ +static inline void vmx_stts() +{ + unsigned long cr0; + + __vmread(GUEST_CR0, &cr0); + if (!(cr0 & X86_CR0_TS)) + __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS); + + __vmread(CR0_READ_SHADOW, &cr0); + if (!(cr0 & X86_CR0_TS)) + __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP | + EXCEPTION_BITMAP_NM); +} + #endif /* __ASM_X86_VMX_H__ */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |