[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] hvm: New HVM function hvm_set_segment_register().
# HG changeset patch # User kfraser@xxxxxxxxxxxxxxxxxxxxx # Date 1190032389 -3600 # Node ID 49700bb716bb71b8a3ed23216522a62cf8f95259 # Parent babe17e7a4eeed5dda1c8e615363d922c93706af hvm: New HVM function hvm_set_segment_register(). Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx> --- xen/arch/x86/hvm/svm/svm.c | 72 +++++++++++++++++++++++++++++------- xen/arch/x86/hvm/vmx/vmx.c | 74 ++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/hvm/hvm.h | 9 ++++ xen/include/asm-x86/hvm/svm/svm.h | 16 +++++++- 4 files changed, 156 insertions(+), 15 deletions(-) diff -r babe17e7a4ee -r 49700bb716bb xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Mon Sep 17 10:38:59 2007 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Mon Sep 17 13:33:09 2007 +0100 @@ -618,9 +618,7 @@ static void svm_sync_vmcb(struct vcpu *v arch_svm->vmcb_in_sync = 1; - asm volatile ( - ".byte 0x0f,0x01,0xdb" /* vmsave */ - : : "a" (__pa(arch_svm->vmcb)) ); + svm_vmsave(arch_svm->vmcb); } static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg) @@ -649,6 +647,7 @@ static void svm_get_segment_register(str struct segment_register *reg) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + switch ( seg ) { case x86_seg_cs: @@ -685,7 +684,58 @@ static void svm_get_segment_register(str svm_sync_vmcb(v); memcpy(reg, &vmcb->ldtr, sizeof(*reg)); break; - default: BUG(); + default: + BUG(); + } +} + +static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg, + struct segment_register *reg) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + + switch ( seg ) + { + case x86_seg_cs: + memcpy(&vmcb->cs, reg, sizeof(*reg)); + break; + case x86_seg_ds: + memcpy(&vmcb->ds, reg, sizeof(*reg)); + break; + case x86_seg_es: + memcpy(&vmcb->es, reg, sizeof(*reg)); + break; + case x86_seg_fs: + svm_sync_vmcb(v); + memcpy(&vmcb->fs, reg, sizeof(*reg)); + svm_vmload(vmcb); + break; + case x86_seg_gs: + svm_sync_vmcb(v); + memcpy(&vmcb->gs, reg, sizeof(*reg)); + svm_vmload(vmcb); + break; + case x86_seg_ss: + memcpy(&vmcb->ss, reg, sizeof(*reg)); + break; + case x86_seg_tr: + svm_sync_vmcb(v); + memcpy(&vmcb->tr, reg, sizeof(*reg)); + svm_vmload(vmcb); + break; + case x86_seg_gdtr: + memcpy(&vmcb->gdtr, reg, sizeof(*reg)); + break; + case x86_seg_idtr: + memcpy(&vmcb->idtr, reg, sizeof(*reg)); + break; + case x86_seg_ldtr: + svm_sync_vmcb(v); + memcpy(&vmcb->ldtr, reg, sizeof(*reg)); + svm_vmload(vmcb); + break; + default: + BUG(); } } @@ -787,10 +837,7 @@ static void svm_ctxt_switch_from(struct svm_save_dr(v); svm_sync_vmcb(v); - - asm volatile ( - ".byte 0x0f,0x01,0xda" /* vmload */ - : : "a" (__pa(root_vmcb[cpu])) ); + svm_vmload(root_vmcb[cpu]); #ifdef __x86_64__ /* Resume use of ISTs now that the host TR is reinstated. */ @@ -826,12 +873,8 @@ static void svm_ctxt_switch_to(struct vc svm_restore_dr(v); - asm volatile ( - ".byte 0x0f,0x01,0xdb" /* vmsave */ - : : "a" (__pa(root_vmcb[cpu])) ); - asm volatile ( - ".byte 0x0f,0x01,0xda" /* vmload */ - : : "a" (__pa(v->arch.hvm_svm.vmcb)) ); + svm_vmsave(root_vmcb[cpu]); + svm_vmload(v->arch.hvm_svm.vmcb); } static void svm_do_resume(struct vcpu *v) @@ -926,6 +969,7 @@ static struct hvm_function_table svm_fun .guest_x86_mode = svm_guest_x86_mode, .get_segment_base = svm_get_segment_base, .get_segment_register = svm_get_segment_register, + .set_segment_register = svm_set_segment_register, .update_host_cr3 = svm_update_host_cr3, .update_guest_cr = svm_update_guest_cr, .update_guest_efer = svm_update_guest_efer, diff -r babe17e7a4ee -r 49700bb716bb xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Sep 17 10:38:59 2007 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Sep 17 13:33:09 2007 +0100 @@ -957,6 +957,79 @@ static void vmx_get_segment_register(str reg->attr.fields.p = 0; } +static void vmx_set_segment_register(struct vcpu *v, enum x86_segment seg, + struct segment_register *reg) +{ + u16 attr; + + ASSERT(v == current); + + attr = reg->attr.bytes; + attr = ((attr & 0xf00) << 4) | (attr & 0xff); + + switch ( seg ) + { + case x86_seg_cs: + __vmwrite(GUEST_CS_SELECTOR, reg->sel); + __vmwrite(GUEST_CS_LIMIT, reg->limit); + __vmwrite(GUEST_CS_BASE, reg->base); + __vmwrite(GUEST_CS_AR_BYTES, attr); + break; + case x86_seg_ds: + __vmwrite(GUEST_DS_SELECTOR, reg->sel); + __vmwrite(GUEST_DS_LIMIT, reg->limit); + __vmwrite(GUEST_DS_BASE, reg->base); + __vmwrite(GUEST_DS_AR_BYTES, attr); + break; + case x86_seg_es: + __vmwrite(GUEST_ES_SELECTOR, reg->sel); + __vmwrite(GUEST_ES_LIMIT, reg->limit); + __vmwrite(GUEST_ES_BASE, reg->base); + __vmwrite(GUEST_ES_AR_BYTES, attr); + break; + case x86_seg_fs: + __vmwrite(GUEST_FS_SELECTOR, reg->sel); + __vmwrite(GUEST_FS_LIMIT, reg->limit); + __vmwrite(GUEST_FS_BASE, reg->base); + __vmwrite(GUEST_FS_AR_BYTES, attr); + break; + case x86_seg_gs: + __vmwrite(GUEST_GS_SELECTOR, reg->sel); + __vmwrite(GUEST_GS_LIMIT, reg->limit); + __vmwrite(GUEST_GS_BASE, reg->base); + __vmwrite(GUEST_GS_AR_BYTES, attr); + break; + case x86_seg_ss: + __vmwrite(GUEST_SS_SELECTOR, reg->sel); + __vmwrite(GUEST_SS_LIMIT, reg->limit); + __vmwrite(GUEST_SS_BASE, reg->base); + __vmwrite(GUEST_SS_AR_BYTES, attr); + break; + case x86_seg_tr: + __vmwrite(GUEST_TR_SELECTOR, reg->sel); + __vmwrite(GUEST_TR_LIMIT, reg->limit); + __vmwrite(GUEST_TR_BASE, reg->base); + __vmwrite(GUEST_TR_AR_BYTES, attr); + break; + case x86_seg_gdtr: + __vmwrite(GUEST_GDTR_LIMIT, reg->limit); + __vmwrite(GUEST_GDTR_BASE, reg->base); + break; + case x86_seg_idtr: + __vmwrite(GUEST_IDTR_LIMIT, reg->limit); + __vmwrite(GUEST_IDTR_BASE, reg->base); + break; + case x86_seg_ldtr: + __vmwrite(GUEST_LDTR_SELECTOR, reg->sel); + __vmwrite(GUEST_LDTR_LIMIT, reg->limit); + __vmwrite(GUEST_LDTR_BASE, reg->base); + __vmwrite(GUEST_LDTR_AR_BYTES, attr); + break; + default: + BUG(); + } +} + /* Make sure that xen intercepts any FP accesses from current */ static void vmx_stts(struct vcpu *v) { @@ -1160,6 +1233,7 @@ static struct hvm_function_table vmx_fun .guest_x86_mode = vmx_guest_x86_mode, .get_segment_base = vmx_get_segment_base, .get_segment_register = vmx_get_segment_register, + .set_segment_register = vmx_set_segment_register, .update_host_cr3 = vmx_update_host_cr3, .update_guest_cr = vmx_update_guest_cr, .update_guest_efer = vmx_update_guest_efer, diff -r babe17e7a4ee -r 49700bb716bb xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Mon Sep 17 10:38:59 2007 +0100 +++ b/xen/include/asm-x86/hvm/hvm.h Mon Sep 17 13:33:09 2007 +0100 @@ -105,6 +105,8 @@ struct hvm_function_table { unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg); void (*get_segment_register)(struct vcpu *v, enum x86_segment seg, struct segment_register *reg); + void (*set_segment_register)(struct vcpu *v, enum x86_segment seg, + struct segment_register *reg); /* * Re-set the value of CR3 that Xen runs on when handling VM exits. @@ -252,6 +254,13 @@ hvm_get_segment_register(struct vcpu *v, struct segment_register *reg) { hvm_funcs.get_segment_register(v, seg, reg); +} + +static inline void +hvm_set_segment_register(struct vcpu *v, enum x86_segment seg, + struct segment_register *reg) +{ + hvm_funcs.set_segment_register(v, seg, reg); } void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, diff -r babe17e7a4ee -r 49700bb716bb xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h Mon Sep 17 10:38:59 2007 +0100 +++ b/xen/include/asm-x86/hvm/svm/svm.h Mon Sep 17 13:33:09 2007 +0100 @@ -28,7 +28,7 @@ #include <asm/hvm/svm/vmcb.h> #include <asm/i387.h> -extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb); +void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb); #define SVM_REG_EAX (0) #define SVM_REG_ECX (1) @@ -47,4 +47,18 @@ extern void svm_dump_vmcb(const char *fr #define SVM_REG_R14 (14) #define SVM_REG_R15 (15) +static inline void svm_vmload(void *vmcb) +{ + asm volatile ( + ".byte 0x0f,0x01,0xda" /* vmload */ + : : "a" (__pa(vmcb)) : "memory" ); +} + +static inline void svm_vmsave(void *vmcb) +{ + asm volatile ( + ".byte 0x0f,0x01,0xdb" /* vmsave */ + : : "a" (__pa(vmcb)) : "memory" ); +} + #endif /* __ASM_X86_HVM_SVM_H__ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |