[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Nested VMX: Emulation of VMRESUME/VMLAUNCH



# HG changeset patch
# User Eddie Dong <eddie.dong@xxxxxxxxx>
# Date 1307607849 -28800
# Node ID 3b59181a388e41a225ba194a57225e335e031a17
# Parent  4dba6bd0dfadcfe165877cfc9b193cf5592050b7
Nested VMX: Emulation of VMRESUME/VMLAUNCH

Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Committed-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---


diff -r 4dba6bd0dfad -r 3b59181a388e xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 09 16:24:09 2011 +0800
@@ -2170,6 +2170,11 @@
     /* Now enable interrupts so it's safe to take locks. */
     local_irq_enable();
 
+    /* XXX: This looks ugly, but we need a mechanism to ensure
+     * any pending vmresume has really happened
+     */
+    vcpu_nestedhvm(v).nv_vmswitch_in_progress = 0;
+
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
         return vmx_failed_vmentry(exit_reason, regs);
 
@@ -2464,10 +2469,18 @@
             update_guest_eip();
         break;
 
+    case EXIT_REASON_VMLAUNCH:
+        if ( nvmx_handle_vmlaunch(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+
+    case EXIT_REASON_VMRESUME:
+        if ( nvmx_handle_vmresume(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
-    case EXIT_REASON_VMLAUNCH:
-    case EXIT_REASON_VMRESUME:
     case EXIT_REASON_GETSEC:
     case EXIT_REASON_INVEPT:
     case EXIT_REASON_INVVPID:
diff -r 4dba6bd0dfad -r 3b59181a388e xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Thu Jun 09 16:24:09 2011 +0800
@@ -261,6 +261,13 @@
     }
 }
 
+static inline u32 __n2_exec_control(struct vcpu *v)
+{
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+    return __get_vvmcs(nvcpu->nv_vvmcx, CPU_BASED_VM_EXEC_CONTROL);
+}
+
 static int vmx_inst_check_privilege(struct cpu_user_regs *regs, int 
vmxop_check)
 {
     struct vcpu *v = current;
@@ -486,6 +493,62 @@
     return X86EMUL_OKAY;
 }
 
+int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    int rc;
+
+    rc = vmx_inst_check_privilege(regs, 0);
+    if ( rc != X86EMUL_OKAY )
+        return rc;
+
+    /* check VMCS is valid and IO BITMAP is set */
+    if ( (nvcpu->nv_vvmcxaddr != VMCX_EADDR) &&
+            ((nvmx->iobitmap[0] && nvmx->iobitmap[1]) ||
+            !(__n2_exec_control(v) & CPU_BASED_ACTIVATE_IO_BITMAP) ) )
+        nvcpu->nv_vmentry_pending = 1;
+    else
+        vmreturn(regs, VMFAIL_INVALID);
+
+    return X86EMUL_OKAY;
+}
+
+int nvmx_handle_vmresume(struct cpu_user_regs *regs)
+{
+    int launched;
+    struct vcpu *v = current;
+
+    launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
+                           NVMX_LAUNCH_STATE);
+    if ( !launched ) {
+       vmreturn (regs, VMFAIL_VALID);
+       return X86EMUL_EXCEPTION;
+    }
+    return nvmx_vmresume(v,regs);
+}
+
+int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
+{
+    int launched;
+    int rc;
+    struct vcpu *v = current;
+
+    launched = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
+                           NVMX_LAUNCH_STATE);
+    if ( launched ) {
+       vmreturn (regs, VMFAIL_VALID);
+       rc = X86EMUL_EXCEPTION;
+    }
+    else {
+        rc = nvmx_vmresume(v,regs);
+        if ( rc == X86EMUL_OKAY )
+            __set_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
+                        NVMX_LAUNCH_STATE, 1);
+    }
+    return rc;
+}
+
 int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
diff -r 4dba6bd0dfad -r 3b59181a388e xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Thu Jun 09 16:24:09 2011 +0800
@@ -158,6 +158,8 @@
 int nvmx_handle_vmclear(struct cpu_user_regs *regs);
 int nvmx_handle_vmread(struct cpu_user_regs *regs);
 int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
+int nvmx_handle_vmresume(struct cpu_user_regs *regs);
+int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
 
 #endif /* __ASM_X86_HVM_VVMX_H__ */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.