[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 08/17] vmx: nest: L1 <-> L2 context switch



This patch adds mode switch between L1 and L2, many controls
and states handling may need additioinal scrutiny.

Roughly, at virtual VMEntry time, sVMCS is loaded, L2 control
is combined from controls of L0 and vVMCS, L2 state from vVMCS
guest state.
when virtual VMExit, host VMCS is loaded, L1 control is from L0,
L1 state from vVMCS host state.

Signed-off-by: Qing He <qing.he@xxxxxxxxx>

---
 arch/x86/hvm/vmx/entry.S       |    1 
 arch/x86/hvm/vmx/nest.c        |  410 +++++++++++++++++++++++++++++++++++++++++
 arch/x86/hvm/vmx/vmcs.c        |   35 +++
 arch/x86/hvm/vmx/vmx.c         |   32 ++-
 include/asm-x86/hvm/vmx/nest.h |   14 +
 include/asm-x86/hvm/vmx/vmcs.h |    4 
 6 files changed, 490 insertions(+), 6 deletions(-)

diff -r 38a4757e94ef -r c7e763bbea63 xen/arch/x86/hvm/vmx/entry.S
--- a/xen/arch/x86/hvm/vmx/entry.S      Thu Apr 22 22:30:09 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/entry.S      Thu Apr 22 22:30:09 2010 +0800
@@ -123,6 +123,7 @@
 .globl vmx_asm_do_vmentry
 vmx_asm_do_vmentry:
         call vmx_intr_assist
+        call vmx_nest_switch_mode
 
         get_current(bx)
         cli
diff -r 38a4757e94ef -r c7e763bbea63 xen/arch/x86/hvm/vmx/nest.c
--- a/xen/arch/x86/hvm/vmx/nest.c       Thu Apr 22 22:30:09 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/nest.c       Thu Apr 22 22:30:09 2010 +0800
@@ -21,6 +21,8 @@
 
 #include <xen/config.h>
 #include <asm/types.h>
+#include <asm/paging.h>
+#include <asm/hvm/support.h>
 #include <asm/hvm/vmx/vmx.h>
 #include <asm/hvm/vmx/vvmcs.h>
 #include <asm/hvm/vmx/nest.h>
@@ -500,3 +502,411 @@
     hvm_inject_exception(TRAP_invalid_op, 0, 0);
     return X86EMUL_EXCEPTION;
 }
+
+int vmx_nest_handle_vmresume(struct cpu_user_regs *regs)
+{
+    struct vcpu *v = current;
+    struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest;
+
+    if ( unlikely(!nest->guest_vmxon_pa) )
+        goto invalid_op;
+
+    if ( nest->vmcs_invalid == 0 )
+        nest->vmresume_pending = 1;
+    else
+        vmreturn(regs, VMFAIL_INVALID);
+
+    return X86EMUL_OKAY;
+
+invalid_op:
+    hvm_inject_exception(TRAP_invalid_op, 0, 0);
+    return X86EMUL_EXCEPTION;
+}
+
+int vmx_nest_handle_vmlaunch(struct cpu_user_regs *regs)
+{
+    /* reuse vmresume for now */
+    return vmx_nest_handle_vmresume(regs);
+}
+
+/*
+ * Nested VMX context switch
+ */
+
+static unsigned long vmcs_gstate_field[] = {
+    /* 16 BITS */
+    GUEST_ES_SELECTOR,
+    GUEST_CS_SELECTOR,
+    GUEST_SS_SELECTOR,
+    GUEST_DS_SELECTOR,
+    GUEST_FS_SELECTOR,
+    GUEST_GS_SELECTOR,
+    GUEST_LDTR_SELECTOR,
+    GUEST_TR_SELECTOR,
+    /* 64 BITS */
+    VMCS_LINK_POINTER,
+    GUEST_IA32_DEBUGCTL,
+#ifndef CONFIG_X86_64
+    VMCS_LINK_POINTER_HIGH,
+    GUEST_IA32_DEBUGCTL_HIGH,
+#endif
+    /* 32 BITS */
+    GUEST_ES_LIMIT,
+    GUEST_CS_LIMIT,
+    GUEST_SS_LIMIT,
+    GUEST_DS_LIMIT,
+    GUEST_FS_LIMIT,
+    GUEST_GS_LIMIT,
+    GUEST_LDTR_LIMIT,
+    GUEST_TR_LIMIT,
+    GUEST_GDTR_LIMIT,
+    GUEST_IDTR_LIMIT,
+    GUEST_ES_AR_BYTES,
+    GUEST_CS_AR_BYTES,
+    GUEST_SS_AR_BYTES,
+    GUEST_DS_AR_BYTES,
+    GUEST_FS_AR_BYTES,
+    GUEST_GS_AR_BYTES,
+    GUEST_LDTR_AR_BYTES,
+    GUEST_TR_AR_BYTES,
+    GUEST_INTERRUPTIBILITY_INFO,
+    GUEST_ACTIVITY_STATE,
+    GUEST_SYSENTER_CS,
+    /* natural */
+    GUEST_ES_BASE,
+    GUEST_CS_BASE,
+    GUEST_SS_BASE,
+    GUEST_DS_BASE,
+    GUEST_FS_BASE,
+    GUEST_GS_BASE,
+    GUEST_LDTR_BASE,
+    GUEST_TR_BASE,
+    GUEST_GDTR_BASE,
+    GUEST_IDTR_BASE,
+    GUEST_DR7,
+    GUEST_RSP,
+    GUEST_RIP,
+    GUEST_RFLAGS,
+    GUEST_PENDING_DBG_EXCEPTIONS,
+    GUEST_SYSENTER_ESP,
+    GUEST_SYSENTER_EIP,
+};
+
+static unsigned long vmcs_ro_field[] = {
+    GUEST_PHYSICAL_ADDRESS,
+    VM_INSTRUCTION_ERROR,
+    VM_EXIT_REASON,
+    VM_EXIT_INTR_INFO,
+    VM_EXIT_INTR_ERROR_CODE,
+    IDT_VECTORING_INFO,
+    IDT_VECTORING_ERROR_CODE,
+    VM_EXIT_INSTRUCTION_LEN,
+    VMX_INSTRUCTION_INFO,
+    EXIT_QUALIFICATION,
+    GUEST_LINEAR_ADDRESS
+};
+
+static struct vmcs_host_to_guest {
+    unsigned long host_field;
+    unsigned long guest_field;
+} vmcs_h2g_field[] = {
+    {HOST_ES_SELECTOR, GUEST_ES_SELECTOR},
+    {HOST_CS_SELECTOR, GUEST_CS_SELECTOR},
+    {HOST_SS_SELECTOR, GUEST_SS_SELECTOR},
+    {HOST_DS_SELECTOR, GUEST_DS_SELECTOR},
+    {HOST_FS_SELECTOR, GUEST_FS_SELECTOR},
+    {HOST_GS_SELECTOR, GUEST_GS_SELECTOR},
+    {HOST_TR_SELECTOR, GUEST_TR_SELECTOR},
+    {HOST_SYSENTER_CS, GUEST_SYSENTER_CS},
+    {HOST_FS_BASE, GUEST_FS_BASE},
+    {HOST_GS_BASE, GUEST_GS_BASE},
+    {HOST_TR_BASE, GUEST_TR_BASE},
+    {HOST_GDTR_BASE, GUEST_GDTR_BASE},
+    {HOST_IDTR_BASE, GUEST_IDTR_BASE},
+    {HOST_SYSENTER_ESP, GUEST_SYSENTER_ESP},
+    {HOST_SYSENTER_EIP, GUEST_SYSENTER_EIP},
+};
+
+
+static void set_shadow_control(struct vmx_nest_struct *nest,
+                               unsigned int field,
+                               u32 host_value)
+{
+    u32 value;
+
+    value = (u32) __get_vvmcs(nest->vvmcs, field) | host_value;
+    __vmwrite(field, value);
+}
+
+void vmx_nest_update_exec_control(struct vcpu *v, unsigned long value)
+{
+    struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest;
+
+    set_shadow_control(nest, CPU_BASED_VM_EXEC_CONTROL, value);
+}
+
+void vmx_nest_update_secondary_exec_control(struct vcpu *v,
+                                            unsigned long value)
+{
+    struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest;
+
+    set_shadow_control(nest, SECONDARY_VM_EXEC_CONTROL, value);
+}
+
+void vmx_nest_update_exception_bitmap(struct vcpu *v, unsigned long value)
+{
+    struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest;
+
+    set_shadow_control(nest, EXCEPTION_BITMAP, value);
+}
+
+static void vvmcs_to_shadow(void *vvmcs, unsigned int field)
+{
+    u64 value;
+
+    value = __get_vvmcs(vvmcs, field);
+    __vmwrite(field, value);
+}
+
+static void vvmcs_from_shadow(void *vvmcs, unsigned int field)
+{
+    u64 value;
+    int rc;
+
+    value = __vmread_safe(field, &rc);
+    if ( !rc )
+        __set_vvmcs(vvmcs, field, value);
+}
+
+static void load_l2_control(struct vmx_nest_struct *nest)
+{
+    u32 exit_control;
+    struct vcpu *v = current;
+
+    /* PIN_BASED, CPU_BASED controls: the union of L0 & L1 */
+    set_shadow_control(nest, PIN_BASED_VM_EXEC_CONTROL,
+                       vmx_pin_based_exec_control);
+    vmx_update_cpu_exec_control(v);
+
+    /* VM_EXIT_CONTROLS: owned by L0 except bits below */
+#define EXIT_CONTROL_GUEST_BITS    ((1<<2) | (1<<18) | (1<<20) | (1<<22))
+    exit_control = __get_vvmcs(nest->vvmcs, VM_EXIT_CONTROLS) &
+                   EXIT_CONTROL_GUEST_BITS;
+    exit_control |= (vmx_vmexit_control & ~EXIT_CONTROL_GUEST_BITS);
+    __vmwrite(VM_EXIT_CONTROLS, exit_control);
+
+    /* VM_ENTRY_CONTROLS: owned by L1 */
+    vvmcs_to_shadow(nest->vvmcs, VM_ENTRY_CONTROLS);
+
+    vmx_update_exception_bitmap(v);
+}
+
+static void load_vvmcs_guest_state(struct vmx_nest_struct *nest)
+{
+    struct vcpu *v = current;
+    int i;
+
+    /* vvmcs.gstate to svmcs.gstate */
+    for ( i = 0; i < ARRAY_SIZE(vmcs_gstate_field); i++ )
+        vvmcs_to_shadow(nest->vvmcs, vmcs_gstate_field[i]);
+
+    hvm_set_cr0(__get_vvmcs(nest->vvmcs, GUEST_CR0));
+    hvm_set_cr4(__get_vvmcs(nest->vvmcs, GUEST_CR4));
+    hvm_set_cr3(__get_vvmcs(nest->vvmcs, GUEST_CR3));
+
+    vvmcs_to_shadow(nest->vvmcs, VM_ENTRY_INTR_INFO);
+    vvmcs_to_shadow(nest->vvmcs, VM_ENTRY_EXCEPTION_ERROR_CODE);
+    vvmcs_to_shadow(nest->vvmcs, VM_ENTRY_INSTRUCTION_LEN);
+
+    /* XXX: should refer to GUEST_HOST_MASK of both L0 and L1 */
+    vvmcs_to_shadow(nest->vvmcs, CR0_READ_SHADOW);
+    vvmcs_to_shadow(nest->vvmcs, CR4_READ_SHADOW);
+    vvmcs_to_shadow(nest->vvmcs, CR0_GUEST_HOST_MASK);
+    vvmcs_to_shadow(nest->vvmcs, CR4_GUEST_HOST_MASK);
+
+    /* TODO: PDPTRs for nested ept */
+    /* TODO: CR3 target control */
+}
+
+static void virtual_vmentry(struct cpu_user_regs *regs)
+{
+    struct vcpu *v = current;
+    struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest;
+#ifdef __x86_64__
+    unsigned long lm_l1, lm_l2;
+#endif
+
+    vmx_vmcs_switch_current(v, v->arch.hvm_vmx.vmcs, nest->svmcs);
+
+    v->arch.hvm_vcpu.in_nesting = 1;
+    nest->vmresume_pending = 0;
+    nest->vmresume_in_progress = 1;
+
+#ifdef __x86_64__
+    /*
+     * EFER handling:
+     * hvm_set_efer won't work if CR0.PG = 1, so we change the value
+     * directly to make hvm_long_mode_enabled(v) work in L2.
+     * An additional update_paging_modes is also needed is
+     * there is 32/64 switch. v->arch.hvm_vcpu.guest_efer doesn't
+     * need to be saved, since its value on vmexit is determined by
+     * L1 exit_controls
+     */
+    lm_l1 = !!hvm_long_mode_enabled(v);
+    lm_l2 = !!(__get_vvmcs(nest->vvmcs, VM_ENTRY_CONTROLS) &
+                           VM_ENTRY_IA32E_MODE);
+
+    if ( lm_l2 )
+        v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
+    else
+        v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
+#endif
+
+    load_l2_control(nest);
+    load_vvmcs_guest_state(nest);
+
+#ifdef __x86_64__
+    if ( lm_l1 != lm_l2 )
+    {
+        paging_update_paging_modes(v);
+    }
+#endif
+
+    regs->rip = __get_vvmcs(nest->vvmcs, GUEST_RIP);
+    regs->rsp = __get_vvmcs(nest->vvmcs, GUEST_RSP);
+    regs->rflags = __get_vvmcs(nest->vvmcs, GUEST_RFLAGS);
+
+    /* TODO: EPT_POINTER */
+}
+
+static void sync_vvmcs_guest_state(struct vmx_nest_struct *nest)
+{
+    int i;
+    unsigned long mask;
+    unsigned long cr;
+
+    /* copy svmcs.gstate back to vvmcs.gstate */
+    for ( i = 0; i < ARRAY_SIZE(vmcs_gstate_field); i++ )
+        vvmcs_from_shadow(nest->vvmcs, vmcs_gstate_field[i]);
+
+    /* SDM 20.6.6: L2 guest execution may change GUEST CR0/CR4 */
+    mask = __get_vvmcs(nest->vvmcs, CR0_GUEST_HOST_MASK);
+    if ( ~mask )
+    {
+        cr = __get_vvmcs(nest->vvmcs, GUEST_CR0);
+        cr = (cr & mask) | (__vmread(GUEST_CR4) & ~mask);
+        __set_vvmcs(nest->vvmcs, GUEST_CR0, cr);
+    }
+
+    mask = __get_vvmcs(nest->vvmcs, CR4_GUEST_HOST_MASK);
+    if ( ~mask )
+    {
+        cr = __get_vvmcs(nest->vvmcs, GUEST_CR4);
+        cr = (cr & mask) | (__vmread(GUEST_CR4) & ~mask);
+        __set_vvmcs(nest->vvmcs, GUEST_CR4, cr);
+    }
+
+    /* CR3 sync if exec doesn't want cr3 load exiting: i.e. nested EPT */
+    if ( !(__get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL) &
+           CPU_BASED_CR3_LOAD_EXITING) )
+        vvmcs_from_shadow(nest->vvmcs, GUEST_CR3);
+}
+
+static void sync_vvmcs_ro(struct vmx_nest_struct *nest)
+{
+    int i;
+
+    for ( i = 0; i < ARRAY_SIZE(vmcs_ro_field); i++ )
+        vvmcs_from_shadow(nest->vvmcs, vmcs_ro_field[i]);
+}
+
+static void load_vvmcs_host_state(struct vmx_nest_struct *nest)
+{
+    struct vcpu *v = current;
+    int i;
+    u64 r;
+
+    for ( i = 0; i < ARRAY_SIZE(vmcs_h2g_field); i++ )
+    {
+        r = __get_vvmcs(nest->vvmcs, vmcs_h2g_field[i].host_field);
+        __vmwrite(vmcs_h2g_field[i].guest_field, r);
+    }
+
+    hvm_set_cr0(__get_vvmcs(nest->vvmcs, HOST_CR0));
+    hvm_set_cr4(__get_vvmcs(nest->vvmcs, HOST_CR4));
+    hvm_set_cr3(__get_vvmcs(nest->vvmcs, HOST_CR3));
+
+    __set_vvmcs(nest->vvmcs, VM_ENTRY_INTR_INFO, 0);
+}
+
+static void virtual_vmexit(struct cpu_user_regs *regs)
+{
+    struct vcpu *v = current;
+    struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest;
+#ifdef __x86_64__
+    unsigned long lm_l1, lm_l2;
+#endif
+
+    sync_vvmcs_ro(nest);
+    sync_vvmcs_guest_state(nest);
+
+    vmx_vmcs_switch_current(v, v->arch.hvm_vmx.vmcs, nest->hvmcs);
+
+    v->arch.hvm_vcpu.in_nesting = 0;
+    nest->vmexit_pending = 0;
+
+#ifdef __x86_64__
+    lm_l2 = !!hvm_long_mode_enabled(v);
+    lm_l1 = !!(__get_vvmcs(nest->vvmcs, VM_EXIT_CONTROLS) &
+                           VM_EXIT_IA32E_MODE);
+
+    if ( lm_l1 )
+        v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
+    else
+        v->arch.hvm_vcpu.guest_efer &= ~(EFER_LMA | EFER_LME);
+#endif
+
+    /* TODO: can be removed? */
+    vmx_update_cpu_exec_control(v);
+    vmx_update_exception_bitmap(v);
+
+    load_vvmcs_host_state(nest);
+
+#ifdef __x86_64__
+    if ( lm_l1 != lm_l2 )
+        paging_update_paging_modes(v);
+#endif
+
+    regs->rip = __get_vvmcs(nest->vvmcs, HOST_RIP);
+    regs->rsp = __get_vvmcs(nest->vvmcs, HOST_RSP);
+    regs->rflags = __vmread(GUEST_RFLAGS);
+
+    vmreturn(regs, VMSUCCEED);
+}
+
+asmlinkage void vmx_nest_switch_mode(void)
+{
+    struct vcpu *v = current;
+    struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest;
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+    /*
+     * a softirq may interrupt us between a virtual vmentry is
+     * just handled and the true vmentry. If during this window,
+     * a L1 virtual interrupt causes another virtual vmexit, we
+     * cannot let that happen or VM_ENTRY_INTR_INFO will be lost.
+     */
+    if ( unlikely(nest->vmresume_in_progress) )
+        return;
+
+    if ( v->arch.hvm_vcpu.in_nesting && nest->vmexit_pending )
+    {
+        local_irq_enable();
+        virtual_vmexit(regs);
+    }
+    else if ( !v->arch.hvm_vcpu.in_nesting && nest->vmresume_pending )
+    {
+        local_irq_enable();
+        virtual_vmentry(regs);
+    }
+}
diff -r 38a4757e94ef -r c7e763bbea63 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Thu Apr 22 22:30:09 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Thu Apr 22 22:30:09 2010 +0800
@@ -542,6 +542,35 @@
               (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
 }
 
+void vmx_vmcs_switch_current(struct vcpu *v,
+                             struct vmcs_struct *from,
+                             struct vmcs_struct *to)
+{
+    /* no foreign access */
+    if ( unlikely(v != current) )
+        return;
+
+    if ( unlikely(current->arch.hvm_vmx.vmcs != from) )
+        return;
+
+    spin_lock(&v->arch.hvm_vmx.vmcs_lock);
+
+    __vmpclear(virt_to_maddr(from));
+    __vmptrld(virt_to_maddr(to));
+
+    v->arch.hvm_vmx.vmcs = to;
+    v->arch.hvm_vmx.launched = 0;
+    this_cpu(current_vmcs) = to;
+
+    if ( v->arch.hvm_vmx.vmcs_host_updated )
+    {
+        v->arch.hvm_vmx.vmcs_host_updated = 0;
+        vmx_set_host_env(v);
+    }
+
+    spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
+}
+
 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr)
 {
     unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap;
@@ -976,6 +1005,12 @@
         hvm_migrate_pirqs(v);
         vmx_set_host_env(v);
         hvm_asid_flush_vcpu(v);
+
+        /*
+         * nesting: we need to do additional host env sync if we have other
+         * VMCS's. Currently this only works with only one active sVMCS.
+         */
+        v->arch.hvm_vmx.vmcs_host_updated = 1;
     }
 
     debug_state = v->domain->debugger_attached;
diff -r 38a4757e94ef -r c7e763bbea63 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Apr 22 22:30:09 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Apr 22 22:30:09 2010 +0800
@@ -392,18 +392,28 @@
 
 void vmx_update_cpu_exec_control(struct vcpu *v)
 {
-    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+    if ( v->arch.hvm_vcpu.in_nesting )
+        vmx_nest_update_exec_control(v, v->arch.hvm_vmx.exec_control);
+    else
+        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
 }
 
 void vmx_update_secondary_exec_control(struct vcpu *v)
 {
-    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-              v->arch.hvm_vmx.secondary_exec_control);
+    if ( v->arch.hvm_vcpu.in_nesting )
+        vmx_nest_update_secondary_exec_control(v,
+            v->arch.hvm_vmx.secondary_exec_control);
+    else
+        __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+                  v->arch.hvm_vmx.secondary_exec_control);
 }
 
 void vmx_update_exception_bitmap(struct vcpu *v)
 {
-    __vmwrite(EXCEPTION_BITMAP, v->arch.hvm_vmx.exception_bitmap);
+    if ( v->arch.hvm_vcpu.in_nesting )
+        vmx_nest_update_exception_bitmap(v, v->arch.hvm_vmx.exception_bitmap);
+    else
+        __vmwrite(EXCEPTION_BITMAP, v->arch.hvm_vmx.exception_bitmap);
 }
 
 static int vmx_guest_x86_mode(struct vcpu *v)
@@ -2348,6 +2358,8 @@
     /* Now enable interrupts so it's safe to take locks. */
     local_irq_enable();
 
+    v->arch.hvm_vmx.nest.vmresume_in_progress = 0;
+
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
         return vmx_failed_vmentry(exit_reason, regs);
 
@@ -2610,6 +2622,11 @@
         if ( vmx_nest_handle_vmclear(regs) == X86EMUL_OKAY )
             __update_guest_eip(inst_len);
         break;
+    case EXIT_REASON_VMLAUNCH:
+        inst_len = __get_instruction_length();
+        if ( vmx_nest_handle_vmlaunch(regs) == X86EMUL_OKAY )
+            __update_guest_eip(inst_len);
+        break;
     case EXIT_REASON_VMPTRLD:
         inst_len = __get_instruction_length();
         if ( vmx_nest_handle_vmptrld(regs) == X86EMUL_OKAY )
@@ -2630,6 +2647,11 @@
         if ( vmx_nest_handle_vmwrite(regs) == X86EMUL_OKAY )
             __update_guest_eip(inst_len);
         break;
+    case EXIT_REASON_VMRESUME:
+        inst_len = __get_instruction_length();
+        if ( vmx_nest_handle_vmresume(regs) == X86EMUL_OKAY )
+            __update_guest_eip(inst_len);
+        break;
     case EXIT_REASON_VMXOFF:
         inst_len = __get_instruction_length();
         if ( vmx_nest_handle_vmxoff(regs) == X86EMUL_OKAY )
@@ -2643,8 +2665,6 @@
 
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
-    case EXIT_REASON_VMLAUNCH:
-    case EXIT_REASON_VMRESUME:
         vmx_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
         break;
 
diff -r 38a4757e94ef -r c7e763bbea63 xen/include/asm-x86/hvm/vmx/nest.h
--- a/xen/include/asm-x86/hvm/vmx/nest.h        Thu Apr 22 22:30:09 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/nest.h        Thu Apr 22 22:30:09 2010 +0800
@@ -40,8 +40,14 @@
     void                *vvmcs;
     struct vmcs_struct  *svmcs;
     int                  vmcs_invalid;
+
+    int                  vmexit_pending;
+    int                  vmresume_pending;
+    int                  vmresume_in_progress;
 };
 
+asmlinkage void vmx_nest_switch_mode(void);
+
 int vmx_nest_handle_vmxon(struct cpu_user_regs *regs);
 int vmx_nest_handle_vmxoff(struct cpu_user_regs *regs);
 
@@ -52,4 +58,12 @@
 int vmx_nest_handle_vmread(struct cpu_user_regs *regs);
 int vmx_nest_handle_vmwrite(struct cpu_user_regs *regs);
 
+int vmx_nest_handle_vmresume(struct cpu_user_regs *regs);
+int vmx_nest_handle_vmlaunch(struct cpu_user_regs *regs);
+
+void vmx_nest_update_exec_control(struct vcpu *v, unsigned long value);
+void vmx_nest_update_secondary_exec_control(struct vcpu *v,
+                                            unsigned long value);
+void vmx_nest_update_exception_bitmap(struct vcpu *v, unsigned long value);
+
 #endif /* __ASM_X86_HVM_NEST_H__ */
diff -r 38a4757e94ef -r c7e763bbea63 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Apr 22 22:30:09 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Apr 22 22:30:09 2010 +0800
@@ -98,6 +98,7 @@
 
     /* nested virtualization */
     struct vmx_nest_struct nest;
+    int                  vmcs_host_updated;
 
 #ifdef __x86_64__
     struct vmx_msr_state msr_state;
@@ -377,6 +378,9 @@
 int vmx_write_guest_msr(u32 msr, u64 val);
 int vmx_add_guest_msr(u32 msr);
 int vmx_add_host_load_msr(u32 msr);
+void vmx_vmcs_switch_current(struct vcpu *v,
+                             struct vmcs_struct *from,
+                             struct vmcs_struct *to);
 
 #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.