[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 12/16] vmx: nest: VMExit handler in L2



handles VMExits happened in L2

Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>

---

diff -r 7a9edf7654ad xen/arch/x86/hvm/vmx/nest.c
--- a/xen/arch/x86/hvm/vmx/nest.c       Wed Sep 08 22:14:26 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/nest.c       Wed Sep 08 22:15:00 2010 +0800
@@ -1109,3 +1109,224 @@
 
     /* TODO: NMI */
 }
+
+/*
+ * L2 VMExit handling
+ */
+
+int vmx_nest_l2_vmexit_handler(struct cpu_user_regs *regs,
+                               unsigned int exit_reason)
+{
+    struct vcpu *v = current;
+    struct vmx_nest_struct *nest = &v->arch.hvm_vmx.nest;
+    u32 ctrl;
+    int bypass_l0 = 0;
+
+    nest->vmexit_pending = 0;
+    nest->intr_info = 0;
+    nest->error_code = 0;
+
+    switch (exit_reason) {
+    case EXIT_REASON_EXCEPTION_NMI:
+    {
+        u32 intr_info = __vmread(VM_EXIT_INTR_INFO);
+        u32 valid_mask = (X86_EVENTTYPE_HW_EXCEPTION << 8) |
+                         INTR_INFO_VALID_MASK;
+        u64 exec_bitmap;
+        int vector = intr_info & INTR_INFO_VECTOR_MASK;
+
+        /*
+         * decided by L0 and L1 exception bitmap, if the vetor is set by
+         * both, L0 has priority on #PF, L1 has priority on others
+         */
+        if ( vector == TRAP_page_fault )
+        {
+            if ( paging_mode_hap(v->domain) )
+                nest->vmexit_pending = 1;
+        }
+        else if ( (intr_info & valid_mask) == valid_mask )
+        {
+            exec_bitmap =__get_vvmcs(nest->vvmcs, EXCEPTION_BITMAP);
+
+            if ( exec_bitmap & (1 << vector) )
+                nest->vmexit_pending = 1;
+        }
+        break;
+    }
+
+    case EXIT_REASON_WBINVD:
+    case EXIT_REASON_EPT_VIOLATION:
+    case EXIT_REASON_EPT_MISCONFIG:
+    case EXIT_REASON_EXTERNAL_INTERRUPT:
+        /* pass to L0 handler */
+        break;
+
+    case VMX_EXIT_REASONS_FAILED_VMENTRY:
+    case EXIT_REASON_TRIPLE_FAULT:
+    case EXIT_REASON_TASK_SWITCH:
+    case EXIT_REASON_IO_INSTRUCTION:
+    case EXIT_REASON_CPUID:
+    case EXIT_REASON_MSR_READ:
+    case EXIT_REASON_MSR_WRITE:
+    case EXIT_REASON_VMCALL:
+    case EXIT_REASON_VMCLEAR:
+    case EXIT_REASON_VMLAUNCH:
+    case EXIT_REASON_VMPTRLD:
+    case EXIT_REASON_VMPTRST:
+    case EXIT_REASON_VMREAD:
+    case EXIT_REASON_VMRESUME:
+    case EXIT_REASON_VMWRITE:
+    case EXIT_REASON_VMXOFF:
+    case EXIT_REASON_VMXON:
+    case EXIT_REASON_INVEPT:
+        /* inject to L1 */
+        nest->vmexit_pending = 1;
+        break;
+
+    case EXIT_REASON_PENDING_VIRT_INTR:
+    {
+        ctrl = v->arch.hvm_vmx.exec_control;
+
+        /*
+         * if both open intr/nmi window, L0 has priority.
+         *
+         * Note that this is not strictly correct, in L2 context,
+         * L0's intr/nmi window flag should be replaced to MTF,
+         * causing an imediate VMExit, but MTF may not be available
+         * on all hardware.
+         */
+        if ( !(ctrl & CPU_BASED_VIRTUAL_INTR_PENDING) )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+    case EXIT_REASON_PENDING_VIRT_NMI:
+    {
+        ctrl = v->arch.hvm_vmx.exec_control;
+
+        if ( !(ctrl & CPU_BASED_VIRTUAL_NMI_PENDING) )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    /* L1 has priority handling several other types of exits */
+    case EXIT_REASON_HLT:
+    {
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( ctrl & CPU_BASED_HLT_EXITING )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    case EXIT_REASON_RDTSC:
+    {
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( ctrl & CPU_BASED_RDTSC_EXITING )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    case EXIT_REASON_RDPMC:
+    {
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( ctrl & CPU_BASED_RDPMC_EXITING )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    case EXIT_REASON_MWAIT_INSTRUCTION:
+    {
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( ctrl & CPU_BASED_MWAIT_EXITING )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    case EXIT_REASON_PAUSE_INSTRUCTION:
+    {
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( ctrl & CPU_BASED_PAUSE_EXITING )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    case EXIT_REASON_MONITOR_INSTRUCTION:
+    {
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( ctrl & CPU_BASED_MONITOR_EXITING )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    case EXIT_REASON_DR_ACCESS:
+    {
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( ctrl & CPU_BASED_MOV_DR_EXITING )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    case EXIT_REASON_INVLPG:
+    {
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( ctrl & CPU_BASED_INVLPG_EXITING )
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+
+    case EXIT_REASON_CR_ACCESS:
+    {
+        u64 exit_qualification = __vmread(EXIT_QUALIFICATION);
+        int cr = exit_qualification & 15;
+        int write = (exit_qualification >> 4) & 3;
+        u32 mask = 0;
+
+        /* also according to guest exec_control */
+        ctrl = __get_vvmcs(nest->vvmcs, CPU_BASED_VM_EXEC_CONTROL);
+
+        if ( cr == 3 )
+        {
+            mask = write? CPU_BASED_CR3_STORE_EXITING:
+                          CPU_BASED_CR3_LOAD_EXITING;
+            if ( ctrl & mask )
+                nest->vmexit_pending = 1;
+        }
+        else if ( cr == 8 )
+        {
+            mask = write? CPU_BASED_CR8_STORE_EXITING:
+                          CPU_BASED_CR8_LOAD_EXITING;
+            if ( ctrl & mask )
+                nest->vmexit_pending = 1;
+        }
+        else  /* CR0, CR4, CLTS, LMSW */
+            nest->vmexit_pending = 1;
+
+        break;
+    }
+    default:
+        gdprintk(XENLOG_WARNING, "Unknown nested vmexit reason %x.\n",
+                 exit_reason);
+    }
+
+    if ( nest->vmexit_pending )
+        bypass_l0 = 1;
+
+    return bypass_l0;
+}
diff -r 7a9edf7654ad xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Sep 08 22:14:26 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Sep 08 22:15:00 2010 +0800
@@ -2373,6 +2373,11 @@
      * any pending vmresume has really happened
      */
     v->arch.hvm_vmx.nest.vmresume_in_progress = 0;
+    if ( v->arch.hvm_vcpu.in_nesting )
+    {
+        if ( vmx_nest_l2_vmexit_handler(regs, exit_reason) )
+            goto out;
+    }
 
     if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
         return vmx_failed_vmentry(exit_reason, regs);
@@ -2745,6 +2750,7 @@
         break;
     }
 
+out:
     if ( v->arch.hvm_vcpu.in_nesting )
         vmx_nest_idtv_handling();
 }
diff -r 7a9edf7654ad xen/include/asm-x86/hvm/vmx/nest.h
--- a/xen/include/asm-x86/hvm/vmx/nest.h        Wed Sep 08 22:14:26 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/nest.h        Wed Sep 08 22:15:00 2010 +0800
@@ -81,4 +81,7 @@
 
 void vmx_nest_idtv_handling(void);
 
+int vmx_nest_l2_vmexit_handler(struct cpu_user_regs *regs,
+                               unsigned int exit_reason);
+
 #endif /* __ASM_X86_HVM_NEST_H__ */
diff -r 7a9edf7654ad xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Sep 08 22:14:26 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Sep 08 22:15:00 2010 +0800
@@ -112,6 +112,7 @@
 #define EXIT_REASON_APIC_ACCESS         44
 #define EXIT_REASON_EPT_VIOLATION       48
 #define EXIT_REASON_EPT_MISCONFIG       49
+#define EXIT_REASON_INVEPT              50
 #define EXIT_REASON_RDTSCP              51
 #define EXIT_REASON_WBINVD              54
 #define EXIT_REASON_XSETBV              55

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.