[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] x86 hvm: Factor out save/restore of segment registers from VMX/SVM
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1202911438 0 # Node ID 5e1df44d406ed760032f62cfabab875691d8c9e6 # Parent 0164d924cebacfef62673a49c2f4ad395df5444b x86 hvm: Factor out save/restore of segment registers from VMX/SVM files into common HVM code. Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx> --- xen/arch/x86/hvm/hvm.c | 121 ++++++++++++++++++++++++++++++++++++++++++-- xen/arch/x86/hvm/svm/svm.c | 98 ----------------------------------- xen/arch/x86/hvm/vmx/vmcs.c | 4 + xen/arch/x86/hvm/vmx/vmx.c | 107 +------------------------------------- 4 files changed, 129 insertions(+), 201 deletions(-) diff -r 0164d924ceba -r 5e1df44d406e xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Wed Feb 13 10:43:13 2008 +0000 +++ b/xen/arch/x86/hvm/hvm.c Wed Feb 13 14:03:58 2008 +0000 @@ -287,9 +287,10 @@ static int hvm_save_cpu_ctxt(struct doma { struct vcpu *v; struct hvm_hw_cpu ctxt; + struct segment_register seg; struct vcpu_guest_context *vc; - for_each_vcpu(d, v) + for_each_vcpu ( d, v ) { /* We don't need to save state for a vcpu that is down; the restore * code will leave it down if there is nothing saved. */ @@ -299,12 +300,69 @@ static int hvm_save_cpu_ctxt(struct doma /* Architecture-specific vmcs/vmcb bits */ hvm_funcs.save_cpu_ctxt(v, &ctxt); - /* Other vcpu register state */ + hvm_get_segment_register(v, x86_seg_idtr, &seg); + ctxt.idtr_limit = seg.limit; + ctxt.idtr_base = seg.base; + + hvm_get_segment_register(v, x86_seg_gdtr, &seg); + ctxt.gdtr_limit = seg.limit; + ctxt.gdtr_base = seg.base; + + hvm_get_segment_register(v, x86_seg_cs, &seg); + ctxt.cs_sel = seg.sel; + ctxt.cs_limit = seg.limit; + ctxt.cs_base = seg.base; + ctxt.cs_arbytes = seg.attr.bytes; + + hvm_get_segment_register(v, x86_seg_ds, &seg); + ctxt.ds_sel = seg.sel; + ctxt.ds_limit = seg.limit; + ctxt.ds_base = seg.base; + ctxt.ds_arbytes = seg.attr.bytes; + + hvm_get_segment_register(v, x86_seg_es, &seg); + ctxt.es_sel = seg.sel; + ctxt.es_limit = seg.limit; + ctxt.es_base = seg.base; + ctxt.es_arbytes = seg.attr.bytes; + + hvm_get_segment_register(v, x86_seg_ss, &seg); + ctxt.ss_sel = seg.sel; + ctxt.ss_limit = seg.limit; + ctxt.ss_base = seg.base; + ctxt.ss_arbytes = seg.attr.bytes; + + hvm_get_segment_register(v, x86_seg_fs, &seg); + ctxt.fs_sel = seg.sel; + ctxt.fs_limit = seg.limit; + ctxt.fs_base = seg.base; + ctxt.fs_arbytes = seg.attr.bytes; + + hvm_get_segment_register(v, x86_seg_gs, &seg); + ctxt.gs_sel = seg.sel; + ctxt.gs_limit = seg.limit; + ctxt.gs_base = seg.base; + ctxt.gs_arbytes = seg.attr.bytes; + + hvm_get_segment_register(v, x86_seg_tr, &seg); + ctxt.tr_sel = seg.sel; + ctxt.tr_limit = seg.limit; + ctxt.tr_base = seg.base; + ctxt.tr_arbytes = seg.attr.bytes; + + hvm_get_segment_register(v, x86_seg_ldtr, &seg); + ctxt.ldtr_sel = seg.sel; + ctxt.ldtr_limit = seg.limit; + ctxt.ldtr_base = seg.base; + ctxt.ldtr_arbytes = seg.attr.bytes; + vc = &v->arch.guest_context; + if ( v->fpu_initialised ) memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs)); else memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs)); + ctxt.rax = vc->user_regs.eax; ctxt.rbx = vc->user_regs.ebx; ctxt.rcx = vc->user_regs.ecx; @@ -343,6 +401,7 @@ static int hvm_load_cpu_ctxt(struct doma int vcpuid, rc; struct vcpu *v; struct hvm_hw_cpu ctxt; + struct segment_register seg; struct vcpu_guest_context *vc; /* Which vcpu is this? */ @@ -398,8 +457,64 @@ static int hvm_load_cpu_ctxt(struct doma if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 ) return -EINVAL; - /* Other vcpu register state */ + seg.limit = ctxt.idtr_limit; + seg.base = ctxt.idtr_base; + hvm_set_segment_register(v, x86_seg_idtr, &seg); + + seg.limit = ctxt.gdtr_limit; + seg.base = ctxt.gdtr_base; + hvm_set_segment_register(v, x86_seg_gdtr, &seg); + + seg.sel = ctxt.cs_sel; + seg.limit = ctxt.cs_limit; + seg.base = ctxt.cs_base; + seg.attr.bytes = ctxt.cs_arbytes; + hvm_set_segment_register(v, x86_seg_cs, &seg); + + seg.sel = ctxt.ds_sel; + seg.limit = ctxt.ds_limit; + seg.base = ctxt.ds_base; + seg.attr.bytes = ctxt.ds_arbytes; + hvm_set_segment_register(v, x86_seg_ds, &seg); + + seg.sel = ctxt.es_sel; + seg.limit = ctxt.es_limit; + seg.base = ctxt.es_base; + seg.attr.bytes = ctxt.es_arbytes; + hvm_set_segment_register(v, x86_seg_es, &seg); + + seg.sel = ctxt.ss_sel; + seg.limit = ctxt.ss_limit; + seg.base = ctxt.ss_base; + seg.attr.bytes = ctxt.ss_arbytes; + hvm_set_segment_register(v, x86_seg_ss, &seg); + + seg.sel = ctxt.fs_sel; + seg.limit = ctxt.fs_limit; + seg.base = ctxt.fs_base; + seg.attr.bytes = ctxt.fs_arbytes; + hvm_set_segment_register(v, x86_seg_fs, &seg); + + seg.sel = ctxt.gs_sel; + seg.limit = ctxt.gs_limit; + seg.base = ctxt.gs_base; + seg.attr.bytes = ctxt.gs_arbytes; + hvm_set_segment_register(v, x86_seg_gs, &seg); + + seg.sel = ctxt.tr_sel; + seg.limit = ctxt.tr_limit; + seg.base = ctxt.tr_base; + seg.attr.bytes = ctxt.tr_arbytes; + hvm_set_segment_register(v, x86_seg_tr, &seg); + + seg.sel = ctxt.ldtr_sel; + seg.limit = ctxt.ldtr_limit; + seg.base = ctxt.ldtr_base; + seg.attr.bytes = ctxt.ldtr_arbytes; + hvm_set_segment_register(v, x86_seg_ldtr, &seg); + memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs)); + vc->user_regs.eax = ctxt.rax; vc->user_regs.ebx = ctxt.rbx; vc->user_regs.ecx = ctxt.rcx; diff -r 0164d924ceba -r 5e1df44d406e xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Wed Feb 13 10:43:13 2008 +0000 +++ b/xen/arch/x86/hvm/svm/svm.c Wed Feb 13 14:03:58 2008 +0000 @@ -181,7 +181,7 @@ static void svm_restore_dr(struct vcpu * __restore_debug_registers(v); } -int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c) +static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -189,52 +189,6 @@ int svm_vmcb_save(struct vcpu *v, struct c->cr2 = v->arch.hvm_vcpu.guest_cr[2]; c->cr3 = v->arch.hvm_vcpu.guest_cr[3]; c->cr4 = v->arch.hvm_vcpu.guest_cr[4]; - - c->idtr_limit = vmcb->idtr.limit; - c->idtr_base = vmcb->idtr.base; - - c->gdtr_limit = vmcb->gdtr.limit; - c->gdtr_base = vmcb->gdtr.base; - - c->cs_sel = vmcb->cs.sel; - c->cs_limit = vmcb->cs.limit; - c->cs_base = vmcb->cs.base; - c->cs_arbytes = vmcb->cs.attr.bytes; - - c->ds_sel = vmcb->ds.sel; - c->ds_limit = vmcb->ds.limit; - c->ds_base = vmcb->ds.base; - c->ds_arbytes = vmcb->ds.attr.bytes; - - c->es_sel = vmcb->es.sel; - c->es_limit = vmcb->es.limit; - c->es_base = vmcb->es.base; - c->es_arbytes = vmcb->es.attr.bytes; - - c->ss_sel = vmcb->ss.sel; - c->ss_limit = vmcb->ss.limit; - c->ss_base = vmcb->ss.base; - c->ss_arbytes = vmcb->ss.attr.bytes; - - c->fs_sel = vmcb->fs.sel; - c->fs_limit = vmcb->fs.limit; - c->fs_base = vmcb->fs.base; - c->fs_arbytes = vmcb->fs.attr.bytes; - - c->gs_sel = vmcb->gs.sel; - c->gs_limit = vmcb->gs.limit; - c->gs_base = vmcb->gs.base; - c->gs_arbytes = vmcb->gs.attr.bytes; - - c->tr_sel = vmcb->tr.sel; - c->tr_limit = vmcb->tr.limit; - c->tr_base = vmcb->tr.base; - c->tr_arbytes = vmcb->tr.attr.bytes; - - c->ldtr_sel = vmcb->ldtr.sel; - c->ldtr_limit = vmcb->ldtr.limit; - c->ldtr_base = vmcb->ldtr.base; - c->ldtr_arbytes = vmcb->ldtr.attr.bytes; c->sysenter_cs = vmcb->sysenter_cs; c->sysenter_esp = vmcb->sysenter_esp; @@ -253,8 +207,7 @@ int svm_vmcb_save(struct vcpu *v, struct return 1; } - -int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) +static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) { unsigned long mfn = 0; p2m_type_t p2mt; @@ -300,53 +253,6 @@ int svm_vmcb_restore(struct vcpu *v, str printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n", __func__, c->cr3, c->cr0, c->cr4); #endif - - vmcb->idtr.limit = c->idtr_limit; - vmcb->idtr.base = c->idtr_base; - - vmcb->gdtr.limit = c->gdtr_limit; - vmcb->gdtr.base = c->gdtr_base; - - vmcb->cs.sel = c->cs_sel; - vmcb->cs.limit = c->cs_limit; - vmcb->cs.base = c->cs_base; - vmcb->cs.attr.bytes = c->cs_arbytes; - - vmcb->ds.sel = c->ds_sel; - vmcb->ds.limit = c->ds_limit; - vmcb->ds.base = c->ds_base; - vmcb->ds.attr.bytes = c->ds_arbytes; - - vmcb->es.sel = c->es_sel; - vmcb->es.limit = c->es_limit; - vmcb->es.base = c->es_base; - vmcb->es.attr.bytes = c->es_arbytes; - - vmcb->ss.sel = c->ss_sel; - vmcb->ss.limit = c->ss_limit; - vmcb->ss.base = c->ss_base; - vmcb->ss.attr.bytes = c->ss_arbytes; - vmcb->cpl = vmcb->ss.attr.fields.dpl; - - vmcb->fs.sel = c->fs_sel; - vmcb->fs.limit = c->fs_limit; - vmcb->fs.base = c->fs_base; - vmcb->fs.attr.bytes = c->fs_arbytes; - - vmcb->gs.sel = c->gs_sel; - vmcb->gs.limit = c->gs_limit; - vmcb->gs.base = c->gs_base; - vmcb->gs.attr.bytes = c->gs_arbytes; - - vmcb->tr.sel = c->tr_sel; - vmcb->tr.limit = c->tr_limit; - vmcb->tr.base = c->tr_base; - vmcb->tr.attr.bytes = c->tr_arbytes; - - vmcb->ldtr.sel = c->ldtr_sel; - vmcb->ldtr.limit = c->ldtr_limit; - vmcb->ldtr.base = c->ldtr_base; - vmcb->ldtr.attr.bytes = c->ldtr_arbytes; vmcb->sysenter_cs = c->sysenter_cs; vmcb->sysenter_esp = c->sysenter_esp; diff -r 0164d924ceba -r 5e1df44d406e xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed Feb 13 10:43:13 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Feb 13 14:03:58 2008 +0000 @@ -338,6 +338,8 @@ void vmx_vmcs_enter(struct vcpu *v) if ( likely(v == current) ) return; + BUG_ON(vcpu_runnable(v)); + fv = &this_cpu(foreign_vmcs); if ( fv->v == v ) @@ -367,6 +369,8 @@ void vmx_vmcs_exit(struct vcpu *v) if ( likely(v == current) ) return; + + BUG_ON(vcpu_runnable(v)); fv = &this_cpu(foreign_vmcs); BUG_ON(fv->v != v); diff -r 0164d924ceba -r 5e1df44d406e xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed Feb 13 10:43:13 2008 +0000 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Feb 13 14:03:58 2008 +0000 @@ -450,7 +450,7 @@ static void vmx_restore_dr(struct vcpu * __restore_debug_registers(v); } -void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) +static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c) { uint32_t ev; @@ -462,52 +462,6 @@ void vmx_vmcs_save(struct vcpu *v, struc c->cr4 = v->arch.hvm_vcpu.guest_cr[4]; c->msr_efer = v->arch.hvm_vcpu.guest_efer; - - c->idtr_limit = __vmread(GUEST_IDTR_LIMIT); - c->idtr_base = __vmread(GUEST_IDTR_BASE); - - c->gdtr_limit = __vmread(GUEST_GDTR_LIMIT); - c->gdtr_base = __vmread(GUEST_GDTR_BASE); - - c->cs_sel = __vmread(GUEST_CS_SELECTOR); - c->cs_limit = __vmread(GUEST_CS_LIMIT); - c->cs_base = __vmread(GUEST_CS_BASE); - c->cs_arbytes = __vmread(GUEST_CS_AR_BYTES); - - c->ds_sel = __vmread(GUEST_DS_SELECTOR); - c->ds_limit = __vmread(GUEST_DS_LIMIT); - c->ds_base = __vmread(GUEST_DS_BASE); - c->ds_arbytes = __vmread(GUEST_DS_AR_BYTES); - - c->es_sel = __vmread(GUEST_ES_SELECTOR); - c->es_limit = __vmread(GUEST_ES_LIMIT); - c->es_base = __vmread(GUEST_ES_BASE); - c->es_arbytes = __vmread(GUEST_ES_AR_BYTES); - - c->ss_sel = __vmread(GUEST_SS_SELECTOR); - c->ss_limit = __vmread(GUEST_SS_LIMIT); - c->ss_base = __vmread(GUEST_SS_BASE); - c->ss_arbytes = __vmread(GUEST_SS_AR_BYTES); - - c->fs_sel = __vmread(GUEST_FS_SELECTOR); - c->fs_limit = __vmread(GUEST_FS_LIMIT); - c->fs_base = __vmread(GUEST_FS_BASE); - c->fs_arbytes = __vmread(GUEST_FS_AR_BYTES); - - c->gs_sel = __vmread(GUEST_GS_SELECTOR); - c->gs_limit = __vmread(GUEST_GS_LIMIT); - c->gs_base = __vmread(GUEST_GS_BASE); - c->gs_arbytes = __vmread(GUEST_GS_AR_BYTES); - - c->tr_sel = __vmread(GUEST_TR_SELECTOR); - c->tr_limit = __vmread(GUEST_TR_LIMIT); - c->tr_base = __vmread(GUEST_TR_BASE); - c->tr_arbytes = __vmread(GUEST_TR_AR_BYTES); - - c->ldtr_sel = __vmread(GUEST_LDTR_SELECTOR); - c->ldtr_limit = __vmread(GUEST_LDTR_LIMIT); - c->ldtr_base = __vmread(GUEST_LDTR_BASE); - c->ldtr_arbytes = __vmread(GUEST_LDTR_AR_BYTES); c->sysenter_cs = __vmread(GUEST_SYSENTER_CS); c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP); @@ -552,7 +506,7 @@ static int vmx_restore_cr0_cr3( return 0; } -int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c) +static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c) { int rc; @@ -584,52 +538,6 @@ int vmx_vmcs_restore(struct vcpu *v, str v->arch.hvm_vcpu.guest_efer = c->msr_efer; vmx_update_guest_efer(v); - - __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit); - __vmwrite(GUEST_IDTR_BASE, c->idtr_base); - - __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit); - __vmwrite(GUEST_GDTR_BASE, c->gdtr_base); - - __vmwrite(GUEST_CS_SELECTOR, c->cs_sel); - __vmwrite(GUEST_CS_LIMIT, c->cs_limit); - __vmwrite(GUEST_CS_BASE, c->cs_base); - __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes); - - __vmwrite(GUEST_DS_SELECTOR, c->ds_sel); - __vmwrite(GUEST_DS_LIMIT, c->ds_limit); - __vmwrite(GUEST_DS_BASE, c->ds_base); - __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes); - - __vmwrite(GUEST_ES_SELECTOR, c->es_sel); - __vmwrite(GUEST_ES_LIMIT, c->es_limit); - __vmwrite(GUEST_ES_BASE, c->es_base); - __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes); - - __vmwrite(GUEST_SS_SELECTOR, c->ss_sel); - __vmwrite(GUEST_SS_LIMIT, c->ss_limit); - __vmwrite(GUEST_SS_BASE, c->ss_base); - __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes); - - __vmwrite(GUEST_FS_SELECTOR, c->fs_sel); - __vmwrite(GUEST_FS_LIMIT, c->fs_limit); - __vmwrite(GUEST_FS_BASE, c->fs_base); - __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes); - - __vmwrite(GUEST_GS_SELECTOR, c->gs_sel); - __vmwrite(GUEST_GS_LIMIT, c->gs_limit); - __vmwrite(GUEST_GS_BASE, c->gs_base); - __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes); - - __vmwrite(GUEST_TR_SELECTOR, c->tr_sel); - __vmwrite(GUEST_TR_LIMIT, c->tr_limit); - __vmwrite(GUEST_TR_BASE, c->tr_base); - __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes); - - __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel); - __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit); - __vmwrite(GUEST_LDTR_BASE, c->ldtr_base); - __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes); __vmwrite(GUEST_SYSENTER_CS, c->sysenter_cs); __vmwrite(GUEST_SYSENTER_ESP, c->sysenter_esp); @@ -821,7 +729,7 @@ static void vmx_get_segment_register(str { uint32_t attr = 0; - ASSERT(v == current); + vmx_vmcs_enter(v); switch ( seg ) { @@ -885,6 +793,8 @@ static void vmx_get_segment_register(str BUG(); } + vmx_vmcs_exit(v); + reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00); /* Unusable flag is folded into Present flag. */ if ( attr & (1u<<16) ) @@ -895,8 +805,6 @@ static void vmx_set_segment_register(str struct segment_register *reg) { uint32_t attr; - - ASSERT((v == current) || !vcpu_runnable(v)); attr = reg->attr.bytes; attr = ((attr & 0xf00) << 4) | (attr & 0xff); @@ -1029,7 +937,6 @@ static enum hvm_intblk vmx_interrupt_blo static void vmx_update_host_cr3(struct vcpu *v) { - ASSERT((v == current) || !vcpu_runnable(v)); vmx_vmcs_enter(v); __vmwrite(HOST_CR3, v->arch.cr3); vmx_vmcs_exit(v); @@ -1037,8 +944,6 @@ static void vmx_update_host_cr3(struct v static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr) { - ASSERT((v == current) || !vcpu_runnable(v)); - vmx_vmcs_enter(v); switch ( cr ) @@ -1088,8 +993,6 @@ static void vmx_update_guest_efer(struct { #ifdef __x86_64__ unsigned long vm_entry_value; - - ASSERT((v == current) || !vcpu_runnable(v)); vmx_vmcs_enter(v); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |