[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-3.4-testing] hvm: Clean up EPT/NPT 'nested page fault' handling.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1256648540 0
# Node ID 6f8d8b88edd61e1f1d6264471f6a5996028bb9c8
# Parent  48ccb3d1ebd040f990d948975e3956184f389dc4
hvm: Clean up EPT/NPT 'nested page fault' handling.

Share most of the code.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   20368:b27f85b54ecc
xen-unstable date:        Mon Oct 26 13:19:33 2009 +0000
---
 xen/arch/x86/hvm/hvm.c            |   29 ++++++++++++++
 xen/arch/x86/hvm/svm/svm.c        |   33 ++++++----------
 xen/arch/x86/hvm/vmx/vmx.c        |   75 ++++++++------------------------------
 xen/include/asm-x86/hvm/hvm.h     |    2 +
 xen/include/asm-x86/hvm/vmx/vmx.h |   29 ++------------
 5 files changed, 64 insertions(+), 104 deletions(-)

diff -r 48ccb3d1ebd0 -r 6f8d8b88edd6 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Oct 27 12:57:40 2009 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Tue Oct 27 13:02:20 2009 +0000
@@ -826,6 +826,35 @@ void hvm_triple_fault(void)
     domain_shutdown(v->domain, SHUTDOWN_reboot);
 }
 
+bool_t hvm_hap_nested_page_fault(unsigned long gfn)
+{
+    p2m_type_t p2mt;
+    mfn_t mfn;
+
+    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
+
+    /*
+     * If this GFN is emulated MMIO or marked as read-only, pass the fault
+     * to the mmio handler.
+     */
+    if ( p2m_is_mmio(p2mt) || (p2mt == p2m_ram_ro) )
+    {
+        if ( !handle_mmio() )
+            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return 1;
+    }
+
+    /* Log-dirty: mark the page dirty and let the guest write it again */
+    if ( p2mt == p2m_ram_logdirty )
+    {
+        paging_mark_dirty(current->domain, mfn_x(mfn));
+        p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+        return 1;
+    }
+
+    return 0;
+}
+
 int hvm_set_efer(uint64_t value)
 {
     struct vcpu *v = current;
diff -r 48ccb3d1ebd0 -r 6f8d8b88edd6 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Oct 27 12:57:40 2009 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Oct 27 13:02:20 2009 +0000
@@ -909,27 +909,20 @@ void start_svm(struct cpuinfo_x86 *c)
     hvm_enable(&svm_function_table);
 }
 
-static void svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs)
-{
+static void svm_do_nested_pgfault(paddr_t gpa)
+{
+    unsigned long gfn = gpa >> PAGE_SHIFT;
+    mfn_t mfn;
     p2m_type_t p2mt;
-    mfn_t mfn;
-    unsigned long gfn = gpa >> PAGE_SHIFT;
-
-    /*
-     * If this GFN is emulated MMIO or marked as read-only, pass the fault
-     * to the mmio handler.
-     */
+
+    if ( hvm_hap_nested_page_fault(gfn) )
+        return;
+
+    /* Everything else is an error. */
     mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
-    if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) )
-    {
-        if ( !handle_mmio() )
-            hvm_inject_exception(TRAP_gp_fault, 0, 0);
-        return;
-    }
-
-    /* Log-dirty: mark the page dirty and let the guest write it again */
-    paging_mark_dirty(current->domain, mfn_x(mfn));
-    p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+    gdprintk(XENLOG_ERR, "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
+             gpa, mfn_x(mfn), p2mt);
+    domain_crash(current->domain);
 }
 
 static void svm_fpu_dirty_intercept(void)
@@ -1428,7 +1421,7 @@ asmlinkage void svm_vmexit_handler(struc
     case VMEXIT_NPF:
         perfc_incra(svmexits, VMEXIT_NPF_PERFC);
         regs->error_code = vmcb->exitinfo1;
-        svm_do_nested_pgfault(vmcb->exitinfo2, regs);
+        svm_do_nested_pgfault(vmcb->exitinfo2);
         break;
 
     case VMEXIT_IRET:
diff -r 48ccb3d1ebd0 -r 6f8d8b88edd6 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Oct 27 12:57:40 2009 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Oct 27 13:02:20 2009 +0000
@@ -2123,50 +2123,16 @@ static void vmx_wbinvd_intercept(void)
 
 static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
 {
-    unsigned long gla_validity = qualification & EPT_GLA_VALIDITY_MASK;
-    struct domain *d = current->domain;
     unsigned long gla, gfn = gpa >> PAGE_SHIFT;
     mfn_t mfn;
-    p2m_type_t t;
-
-    mfn = gfn_to_mfn_guest(d, gfn, &t);
-
-    /* There are three legitimate reasons for taking an EPT violation. 
-     * One is a guest access to MMIO space. */
-    if ( gla_validity == EPT_GLA_VALIDITY_MATCH && p2m_is_mmio(t) )
-    {
-        handle_mmio();
+    p2m_type_t p2mt;
+
+    if ( (qualification & EPT_GLA_VALID) &&
+         hvm_hap_nested_page_fault(gfn) )
         return;
-    }
-
-    /* The second is log-dirty mode, writing to a read-only page;
-     * The third is populating a populate-on-demand page. */
-    if ( (gla_validity == EPT_GLA_VALIDITY_MATCH
-          || gla_validity == EPT_GLA_VALIDITY_GPT_WALK)
-         && p2m_is_ram(t) && (t != p2m_ram_ro) )
-    {
-        if ( paging_mode_log_dirty(d) )
-        {
-            paging_mark_dirty(d, mfn_x(mfn));
-            p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
-            flush_tlb_mask(d->domain_dirty_cpumask);
-        }
-        return;
-    }
-
-    /* Ignore writes to:
-     *     1. read only memory regions;
-     *     2. memory holes. */
-    if ( (qualification & EPT_WRITE_VIOLATION)
-         && (((gla_validity == EPT_GLA_VALIDITY_MATCH) && (t == p2m_ram_ro))
-             || (mfn_x(mfn) == INVALID_MFN)) ) {
-        int inst_len = __get_instruction_length();
-        __update_guest_eip(inst_len);
-        return;
-    }
 
     /* Everything else is an error. */
-    gla = __vmread(GUEST_LINEAR_ADDRESS);
+    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
     gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
              "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", 
              qualification, 
@@ -2176,29 +2142,20 @@ static void ept_handle_violation(unsigne
              (qualification & EPT_EFFECTIVE_READ) ? 'r' : '-',
              (qualification & EPT_EFFECTIVE_WRITE) ? 'w' : '-',
              (qualification & EPT_EFFECTIVE_EXEC) ? 'x' : '-',
-             gpa, mfn_x(mfn), t);
+             gpa, mfn_x(mfn), p2mt);
+
+    if ( qualification & EPT_GLA_VALID )
+    {
+        gla = __vmread(GUEST_LINEAR_ADDRESS);
+        gdprintk(XENLOG_ERR, " --- GLA %#lx\n", gla);
+    }
 
     if ( qualification & EPT_GAW_VIOLATION )
         gdprintk(XENLOG_ERR, " --- GPA too wide (max %u bits)\n", 
-                 9 * (unsigned) d->arch.hvm_domain.vmx.ept_control.gaw + 21);
-
-    switch ( gla_validity )
-    {
-    case EPT_GLA_VALIDITY_PDPTR_LOAD:
-        gdprintk(XENLOG_ERR, " --- PDPTR load failed\n"); 
-        break;
-    case EPT_GLA_VALIDITY_GPT_WALK:
-        gdprintk(XENLOG_ERR, " --- guest PT walk to %#lx failed\n", gla);
-        break;
-    case EPT_GLA_VALIDITY_RSVD:
-        gdprintk(XENLOG_ERR, " --- GLA_validity 2 (reserved)\n");
-        break;
-    case EPT_GLA_VALIDITY_MATCH:
-        gdprintk(XENLOG_ERR, " --- guest access to %#lx failed\n", gla);
-        break;
-    }
-
-    domain_crash(d);
+                 9 * (unsigned int)current->domain->arch.hvm_domain.
+                 vmx.ept_control.gaw + 21);
+
+    domain_crash(current->domain);
 }
 
 static void vmx_failed_vmentry(unsigned int exit_reason,
diff -r 48ccb3d1ebd0 -r 6f8d8b88edd6 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Tue Oct 27 12:57:40 2009 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Tue Oct 27 13:02:20 2009 +0000
@@ -323,4 +323,6 @@ static inline void hvm_set_info_guest(st
 
 int hvm_debug_op(struct vcpu *v, int32_t op);
 
+bool_t hvm_hap_nested_page_fault(unsigned long gfn);
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
diff -r 48ccb3d1ebd0 -r 6f8d8b88edd6 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue Oct 27 12:57:40 2009 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Oct 27 13:02:20 2009 +0000
@@ -365,45 +365,24 @@ void ept_p2m_init(struct domain *d);
 void ept_p2m_init(struct domain *d);
 
 /* EPT violation qualifications definitions */
-/* bit offset 0 in exit qualification */
 #define _EPT_READ_VIOLATION         0
 #define EPT_READ_VIOLATION          (1UL<<_EPT_READ_VIOLATION)
-/* bit offset 1 in exit qualification */
 #define _EPT_WRITE_VIOLATION        1
 #define EPT_WRITE_VIOLATION         (1UL<<_EPT_WRITE_VIOLATION)
-/* bit offset 2 in exit qualification */
 #define _EPT_EXEC_VIOLATION         2
 #define EPT_EXEC_VIOLATION          (1UL<<_EPT_EXEC_VIOLATION)
-
-/* bit offset 3 in exit qualification */
 #define _EPT_EFFECTIVE_READ         3
 #define EPT_EFFECTIVE_READ          (1UL<<_EPT_EFFECTIVE_READ)
-/* bit offset 4 in exit qualification */
 #define _EPT_EFFECTIVE_WRITE        4
 #define EPT_EFFECTIVE_WRITE         (1UL<<_EPT_EFFECTIVE_WRITE)
-/* bit offset 5 in exit qualification */
 #define _EPT_EFFECTIVE_EXEC         5
 #define EPT_EFFECTIVE_EXEC          (1UL<<_EPT_EFFECTIVE_EXEC)
-
-/* bit offset 6 in exit qualification */
 #define _EPT_GAW_VIOLATION          6
 #define EPT_GAW_VIOLATION           (1UL<<_EPT_GAW_VIOLATION)
-
-/* bits offset 7 & 8 in exit qualification */
-#define _EPT_GLA_VALIDITY           7
-#define EPT_GLA_VALIDITY_MASK       (3UL<<_EPT_GLA_VALIDITY)
-/* gla != gpa, when load PDPTR */
-#define EPT_GLA_VALIDITY_PDPTR_LOAD (0UL<<_EPT_GLA_VALIDITY)
-/* gla != gpa, during guest page table walking */
-#define EPT_GLA_VALIDITY_GPT_WALK   (1UL<<_EPT_GLA_VALIDITY)
-/* reserved */
-#define EPT_GLA_VALIDITY_RSVD       (2UL<<_EPT_GLA_VALIDITY)
-/* gla == gpa, normal case */
-#define EPT_GLA_VALIDITY_MATCH      (3UL<<_EPT_GLA_VALIDITY)
-
-#define EPT_EFFECTIVE_MASK          (EPT_EFFECTIVE_READ  |  \
-                                     EPT_EFFECTIVE_WRITE |  \
-                                     EPT_EFFECTIVE_EXEC)
+#define _EPT_GLA_VALID              7
+#define EPT_GLA_VALID               (1UL<<_EPT_GLA_VALID)
+#define _EPT_GLA_FAULT              8
+#define EPT_GLA_FAULT               (1UL<<_EPT_GLA_FAULT)
 
 #define EPT_PAGETABLE_ENTRIES       512
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.