[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Abstract some details of context switching into ctxt_switch_from/to
# HG changeset patch # User kaf24@xxxxxxxxxxxxxxxxxxxx # Node ID 760f9149dbaa8e07445f1e1227380a92b4205a3a # Parent 768936b2800aebca0513dbc626890378946f256f Abstract some details of context switching into ctxt_switch_from/to function hooks. Allows neater separation between paravirtual and HVM code paths. Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> diff -r 768936b2800a -r 760f9149dbaa xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Sun Mar 19 14:17:50 2006 +++ b/xen/arch/x86/domain.c Sun Mar 19 16:10:20 2006 @@ -51,6 +51,9 @@ } __cacheline_aligned; static struct percpu_ctxt percpu_ctxt[NR_CPUS]; +static void paravirt_ctxt_switch_from(struct vcpu *v); +static void paravirt_ctxt_switch_to(struct vcpu *v); + static void continue_idle_domain(struct vcpu *v) { reset_stack_and_jump(idle_loop); @@ -225,6 +228,9 @@ { v->arch.schedule_tail = continue_nonidle_domain; } + + v->arch.ctxt_switch_from = paravirt_ctxt_switch_from; + v->arch.ctxt_switch_to = paravirt_ctxt_switch_to; v->arch.perdomain_ptes = d->arch.mm_perdomain_pt + (vcpu_id << GDT_LDT_VCPU_SHIFT); @@ -685,21 +691,32 @@ percpu_ctxt[smp_processor_id()].dirty_segment_mask = dirty_segment_mask; } -#define switch_kernel_stack(_n,_c) ((void)0) +#define switch_kernel_stack(v) ((void)0) #elif defined(__i386__) #define load_segments(n) ((void)0) #define save_segments(p) ((void)0) -static inline void switch_kernel_stack(struct vcpu *n, unsigned int cpu) -{ - struct tss_struct *tss = &init_tss[cpu]; - tss->esp1 = n->arch.guest_context.kernel_sp; - tss->ss1 = n->arch.guest_context.kernel_ss; -} - -#endif +static inline void switch_kernel_stack(struct vcpu *v) +{ + struct tss_struct *tss = &init_tss[smp_processor_id()]; + tss->esp1 = v->arch.guest_context.kernel_sp; + tss->ss1 = v->arch.guest_context.kernel_ss; +} + +#endif /* __i386__ */ + +static void paravirt_ctxt_switch_from(struct vcpu *v) +{ + save_segments(v); +} + +static void paravirt_ctxt_switch_to(struct vcpu *v) +{ + set_int80_direct_trap(v); + switch_kernel_stack(v); +} #define loaddebug(_v,_reg) \ __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) @@ -720,15 +737,7 @@ stack_regs, CTXT_SWITCH_STACK_BYTES); unlazy_fpu(p); - if ( !hvm_guest(p) ) - { - save_segments(p); - } - else - { - hvm_save_segments(p); - hvm_load_msrs(); - } + p->arch.ctxt_switch_from(p); } if ( !is_idle_vcpu(n) ) @@ -749,15 +758,7 @@ loaddebug(&n->arch.guest_context, 7); } - if ( !hvm_guest(n) ) - { - set_int80_direct_trap(n); - switch_kernel_stack(n, cpu); - } - else - { - hvm_restore_msrs(n); - } + n->arch.ctxt_switch_to(n); } if ( p->domain != n->domain ) diff -r 768936b2800a -r 760f9149dbaa xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Sun Mar 19 14:17:50 2006 +++ b/xen/arch/x86/hvm/svm/svm.c Sun Mar 19 16:10:20 2006 @@ -200,7 +200,8 @@ return 1; } -void svm_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) +static void svm_store_cpu_guest_regs( + struct vcpu *v, struct cpu_user_regs *regs) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -227,23 +228,11 @@ #endif } -void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs) +static void svm_load_cpu_guest_regs( + struct vcpu *v, struct cpu_user_regs *regs) { svm_load_cpu_user_regs(v, regs); } - -#ifdef __x86_64__ - -void svm_save_segments(struct vcpu *v) -{ -} -void svm_load_msrs(void) -{ -} -void svm_restore_msrs(struct vcpu *v) -{ -} -#endif #define IS_CANO_ADDRESS(add) 1 @@ -458,12 +447,6 @@ hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs; hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs; - -#ifdef __x86_64__ - hvm_funcs.save_segments = svm_save_segments; - hvm_funcs.load_msrs = svm_load_msrs; - hvm_funcs.restore_msrs = svm_restore_msrs; -#endif hvm_funcs.store_cpu_guest_ctrl_regs = svm_store_cpu_guest_ctrl_regs; hvm_funcs.modify_guest_state = svm_modify_guest_state; @@ -687,9 +670,19 @@ reset_stack_and_jump(svm_asm_do_launch); } +static void svm_ctxt_switch_from(struct vcpu *v) +{ +} + +static void svm_ctxt_switch_to(struct vcpu *v) +{ +} + void svm_final_setup_guest(struct vcpu *v) { - v->arch.schedule_tail = arch_svm_do_launch; + v->arch.schedule_tail = arch_svm_do_launch; + v->arch.ctxt_switch_from = svm_ctxt_switch_from; + v->arch.ctxt_switch_to = svm_ctxt_switch_to; if (v == v->domain->vcpu[0]) { diff -r 768936b2800a -r 760f9149dbaa xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Sun Mar 19 14:17:50 2006 +++ b/xen/arch/x86/hvm/vmx/vmx.c Sun Mar 19 16:10:20 2006 @@ -50,9 +50,14 @@ static unsigned long trace_values[NR_CPUS][4]; #define TRACE_VMEXIT(index,value) trace_values[smp_processor_id()][index]=value +static void vmx_ctxt_switch_from(struct vcpu *v); +static void vmx_ctxt_switch_to(struct vcpu *v); + void vmx_final_setup_guest(struct vcpu *v) { - v->arch.schedule_tail = arch_vmx_do_launch; + v->arch.schedule_tail = arch_vmx_do_launch; + v->arch.ctxt_switch_from = vmx_ctxt_switch_from; + v->arch.ctxt_switch_to = vmx_ctxt_switch_to; if ( v->vcpu_id == 0 ) { @@ -105,6 +110,7 @@ } #ifdef __x86_64__ + static struct vmx_msr_state percpu_msr[NR_CPUS]; static u32 msr_data_index[VMX_MSR_COUNT] = @@ -113,7 +119,7 @@ MSR_SYSCALL_MASK, MSR_EFER, }; -void vmx_save_segments(struct vcpu *v) +static void vmx_save_segments(struct vcpu *v) { rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_content.shadow_gs); } @@ -124,7 +130,7 @@ * are not modified once set for generic domains, we don't save them, * but simply reset them to the values set at percpu_traps_init(). */ -void vmx_load_msrs(void) +static void vmx_load_msrs(void) { struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()]; int i; @@ -302,8 +308,7 @@ return 1; } -void -vmx_restore_msrs(struct vcpu *v) +static void vmx_restore_msrs(struct vcpu *v) { int i = 0; struct vmx_msr_state *guest_state; @@ -323,22 +328,42 @@ HVM_DBG_LOG(DBG_LEVEL_2, "restore guest's index %d msr %lx with %lx\n", - i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]); + i, (unsigned long)msr_data_index[i], + (unsigned long)guest_state->msr_items[i]); set_bit(i, &host_state->flags); wrmsrl(msr_data_index[i], guest_state->msr_items[i]); clear_bit(i, &guest_flags); } } #else /* __i386__ */ -#define vmx_save_init_msrs() ((void)0) - -static inline int long_mode_do_msr_read(struct cpu_user_regs *regs){ + +#define vmx_save_segments(v) ((void)0) +#define vmx_load_msrs() ((void)0) +#define vmx_restore_msrs(v) ((void)0) +#define vmx_save_init_msrs() ((void)0) + +static inline int long_mode_do_msr_read(struct cpu_user_regs *regs) +{ return 0; } -static inline int long_mode_do_msr_write(struct cpu_user_regs *regs){ + +static inline int long_mode_do_msr_write(struct cpu_user_regs *regs) +{ return 0; } -#endif + +#endif /* __i386__ */ + +static void vmx_ctxt_switch_from(struct vcpu *v) +{ + vmx_save_segments(v); + vmx_load_msrs(); +} + +static void vmx_ctxt_switch_to(struct vcpu *v) +{ + vmx_restore_msrs(v); +} void stop_vmx(void) { @@ -579,12 +604,6 @@ hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs; hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs; - -#ifdef __x86_64__ - hvm_funcs.save_segments = vmx_save_segments; - hvm_funcs.load_msrs = vmx_load_msrs; - hvm_funcs.restore_msrs = vmx_restore_msrs; -#endif hvm_funcs.store_cpu_guest_ctrl_regs = vmx_store_cpu_guest_ctrl_regs; hvm_funcs.modify_guest_state = vmx_modify_guest_state; diff -r 768936b2800a -r 760f9149dbaa xen/include/asm-x86/domain.h --- a/xen/include/asm-x86/domain.h Sun Mar 19 14:17:50 2006 +++ b/xen/include/asm-x86/domain.h Sun Mar 19 16:10:20 2006 @@ -124,6 +124,9 @@ void (*schedule_tail) (struct vcpu *); + void (*ctxt_switch_from) (struct vcpu *); + void (*ctxt_switch_to) (struct vcpu *); + /* Bounce information for propagating an exception to guest OS. */ struct trap_bounce trap_bounce; diff -r 768936b2800a -r 760f9149dbaa xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Sun Mar 19 14:17:50 2006 +++ b/xen/include/asm-x86/hvm/hvm.h Sun Mar 19 16:10:20 2006 @@ -41,18 +41,11 @@ /* * Store and load guest state: * 1) load/store guest register state, - * 2) load/store segment state (x86_64 only), - * 3) load/store msr register state (x86_64 only), - * 4) store guest control register state (used for panic dumps), - * 5) modify guest state (e.g., set debug flags). + * 2) store guest control register state (used for panic dumps), + * 3) modify guest state (e.g., set debug flags). */ void (*store_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r); void (*load_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r); -#ifdef __x86_64__ - void (*save_segments)(struct vcpu *v); - void (*load_msrs)(void); - void (*restore_msrs)(struct vcpu *v); -#endif void (*store_cpu_guest_ctrl_regs)(struct vcpu *v, unsigned long crs[8]); void (*modify_guest_state)(struct vcpu *v); @@ -111,33 +104,6 @@ hvm_funcs.load_cpu_guest_regs(v, r); } -#ifdef __x86_64__ -static inline void -hvm_save_segments(struct vcpu *v) -{ - if (hvm_funcs.save_segments) - hvm_funcs.save_segments(v); -} - -static inline void -hvm_load_msrs(void) -{ - if (hvm_funcs.load_msrs) - hvm_funcs.load_msrs(); -} - -static inline void -hvm_restore_msrs(struct vcpu *v) -{ - if (hvm_funcs.restore_msrs) - hvm_funcs.restore_msrs(v); -} -#else -#define hvm_save_segments(v) ((void)0) -#define hvm_load_msrs(v) ((void)0) -#define hvm_restore_msrs(v) ((void)0) -#endif /* __x86_64__ */ - static inline void hvm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8]) { _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |