[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Share ASID logic between VMX and SVM.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1260281667 0
# Node ID 7f611de6b93cc4cbec0530548a85953afa845315
# Parent  2d92ad3ef517208240aa3d00e22516cf885ef351
hvm: Share ASID logic between VMX and SVM.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/asid.c            |    6 +-
 xen/arch/x86/hvm/hvm.c             |    2 
 xen/arch/x86/hvm/svm/svm.c         |   13 ----
 xen/arch/x86/hvm/svm/vmcb.c        |    3 -
 xen/arch/x86/hvm/vmx/entry.S       |    2 
 xen/arch/x86/hvm/vmx/vmcs.c        |   12 +++-
 xen/arch/x86/hvm/vmx/vmx.c         |  111 +++++++++++++------------------------
 xen/include/asm-x86/hvm/asid.h     |    6 +-
 xen/include/asm-x86/hvm/hvm.h      |   18 ++----
 xen/include/asm-x86/hvm/svm/asid.h |    2 
 xen/include/asm-x86/hvm/vmx/vmcs.h |    2 
 xen/include/asm-x86/hvm/vmx/vmx.h  |   12 ----
 12 files changed, 73 insertions(+), 116 deletions(-)

diff -r 2d92ad3ef517 -r 7f611de6b93c xen/arch/x86/hvm/asid.c
--- a/xen/arch/x86/hvm/asid.c   Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/arch/x86/hvm/asid.c   Tue Dec 08 14:14:27 2009 +0000
@@ -20,7 +20,9 @@
 #include <xen/config.h>
 #include <xen/init.h>
 #include <xen/lib.h>
-#include <xen/perfc.h>
+#include <xen/sched.h>
+#include <xen/smp.h>
+#include <xen/percpu.h>
 #include <asm/hvm/asid.h>
 
 /*
@@ -80,7 +82,7 @@ void hvm_asid_init(int nasids)
     data->next_asid = 1;
 }
 
-void hvm_asid_invalidate_asid(struct vcpu *v)
+void hvm_asid_flush_vcpu(struct vcpu *v)
 {
     v->arch.hvm_vcpu.asid_generation = 0;
 }
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Tue Dec 08 14:14:27 2009 +0000
@@ -756,6 +756,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
 
+    hvm_asid_flush_vcpu(v);
+
     if ( cpu_has_xsave )
     {
         /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Dec 08 14:14:27 2009 +0000
@@ -424,7 +424,7 @@ static void svm_update_guest_cr(struct v
         break;
     case 3:
         vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
-        hvm_asid_invalidate_asid(v);
+        hvm_asid_flush_vcpu(v);
         break;
     case 4:
         vmcb->cr4 = HVM_CR4_HOST_MASK;
@@ -453,14 +453,6 @@ static void svm_update_guest_efer(struct
     svm_intercept_msr(v, MSR_IA32_SYSENTER_CS, lma);
     svm_intercept_msr(v, MSR_IA32_SYSENTER_ESP, lma);
     svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
-}
-
-static void svm_flush_guest_tlbs(void)
-{
-    /* Roll over the CPU's ASID generation, so it gets a clean TLB when we
-     * next VMRUN.  (If ASIDs are disabled, the whole TLB is flushed on
-     * VMRUN anyway). */
-    hvm_asid_flush_core();
 }
 
 static void svm_sync_vmcb(struct vcpu *v)
@@ -704,7 +696,7 @@ static void svm_do_resume(struct vcpu *v
         hvm_migrate_timers(v);
 
         /* Migrating to another ASID domain.  Request a new ASID. */
-        hvm_asid_invalidate_asid(v);
+        hvm_asid_flush_vcpu(v);
     }
 
     /* Reflect the vlapic's TPR in the hardware vtpr */
@@ -1250,7 +1242,6 @@ static struct hvm_function_table __read_
     .update_host_cr3      = svm_update_host_cr3,
     .update_guest_cr      = svm_update_guest_cr,
     .update_guest_efer    = svm_update_guest_efer,
-    .flush_guest_tlbs     = svm_flush_guest_tlbs,
     .set_tsc_offset       = svm_set_tsc_offset,
     .inject_exception     = svm_inject_exception,
     .init_hypercall_page  = svm_init_hypercall_page,
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Tue Dec 08 14:14:27 2009 +0000
@@ -113,9 +113,6 @@ static int construct_vmcb(struct vcpu *v
 {
     struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
     struct vmcb_struct *vmcb = arch_svm->vmcb;
-
-    /* TLB control, and ASID assigment. */
-    hvm_asid_invalidate_asid(v);
 
     vmcb->general1_intercepts = 
         GENERAL1_INTERCEPT_INTR        | GENERAL1_INTERCEPT_NMI         |
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/arch/x86/hvm/vmx/entry.S
--- a/xen/arch/x86/hvm/vmx/entry.S      Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/arch/x86/hvm/vmx/entry.S      Tue Dec 08 14:14:27 2009 +0000
@@ -142,9 +142,9 @@ vmx_asm_do_vmentry:
         call_with_regs(vmx_enter_realmode) 
 
 .Lvmx_not_realmode:
+        call vmx_vmenter_helper
         mov  VCPU_hvm_guest_cr2(r(bx)),r(ax)
         mov  r(ax),%cr2
-        call vmx_trace_vmentry
 
         lea  UREGS_rip(r(sp)),r(di)
         mov  $GUEST_RIP,%eax
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Tue Dec 08 14:14:27 2009 +0000
@@ -400,9 +400,12 @@ int vmx_cpu_up(void)
         BUG();
     }
 
+    hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0);
+
     ept_sync_all();
 
-    vpid_sync_all();
+    if ( cpu_has_vmx_vpid )
+        vpid_sync_all();
 
     return 1;
 }
@@ -558,6 +561,9 @@ static int construct_vmcs(struct vcpu *v
         v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
 
     v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
+
+    /* Disable VPID for now: we decide when to enable it on VMENTER. */
+    v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
 
     if ( paging_mode_hap(d) )
     {
@@ -736,7 +742,7 @@ static int construct_vmcs(struct vcpu *v
     }
 
     if ( cpu_has_vmx_vpid )
-        __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vmx.vpid);
+        __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid);
 
     if ( cpu_has_vmx_pat && paging_mode_hap(d) )
     {
@@ -946,7 +952,7 @@ void vmx_do_resume(struct vcpu *v)
         hvm_migrate_timers(v);
         hvm_migrate_pirqs(v);
         vmx_set_host_env(v);
-        vpid_sync_vcpu_all(v);
+        hvm_asid_flush_vcpu(v);
     }
 
     debug_state = v->domain->debugger_attached;
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Dec 08 14:14:27 2009 +0000
@@ -60,8 +60,6 @@ static void vmx_ctxt_switch_to(struct vc
 
 static int  vmx_alloc_vlapic_mapping(struct domain *d);
 static void vmx_free_vlapic_mapping(struct domain *d);
-static int  vmx_alloc_vpid(struct vcpu *v);
-static void vmx_free_vpid(struct vcpu *v);
 static void vmx_install_vlapic_mapping(struct vcpu *v);
 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
 static void vmx_update_guest_efer(struct vcpu *v);
@@ -104,9 +102,6 @@ static int vmx_vcpu_initialise(struct vc
 
     spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
 
-    if ( (rc = vmx_alloc_vpid(v)) != 0 )
-        return rc;
-
     v->arch.schedule_tail    = vmx_do_resume;
     v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
     v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
@@ -116,7 +111,6 @@ static int vmx_vcpu_initialise(struct vc
         dprintk(XENLOG_WARNING,
                 "Failed to create VMCS for vcpu %d: err=%d.\n",
                 v->vcpu_id, rc);
-        vmx_free_vpid(v);
         return rc;
     }
 
@@ -136,7 +130,6 @@ static void vmx_vcpu_destroy(struct vcpu
     vmx_destroy_vmcs(v);
     vpmu_destroy(v);
     passive_domain_destroy(v);
-    vmx_free_vpid(v);
 }
 
 #ifdef __x86_64__
@@ -1168,7 +1161,7 @@ static void vmx_update_guest_cr(struct v
         }
  
         __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
-        vpid_sync_vcpu_all(v);
+        hvm_asid_flush_vcpu(v);
         break;
     case 4:
         v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
@@ -1212,19 +1205,6 @@ static void vmx_update_guest_efer(struct
     if ( v == current )
         write_efer((read_efer() & ~EFER_SCE) |
                    (v->arch.hvm_vcpu.guest_efer & EFER_SCE));
-}
-
-static void vmx_flush_guest_tlbs(void)
-{
-    /*
-     * If VPID (i.e. tagged TLB support) is not enabled, the fact that
-     * we're in Xen at all means any guest will have a clean TLB when
-     * it's next run, because VMRESUME will flush it for us.
-     *
-     * If enabled, we invalidate all translations associated with all
-     * VPID values.
-     */
-    vpid_sync_all();
 }
 
 static void __ept_sync_domain(void *info)
@@ -1358,7 +1338,7 @@ static void vmx_set_uc_mode(struct vcpu 
     if ( paging_mode_hap(v->domain) )
         ept_change_entry_emt_with_range(
             v->domain, 0, v->domain->arch.p2m->max_mapped_pfn);
-    vpid_sync_all();
+    hvm_asid_flush_vcpu(v);
 }
 
 static void vmx_set_info_guest(struct vcpu *v)
@@ -1405,7 +1385,6 @@ static struct hvm_function_table __read_
     .update_host_cr3      = vmx_update_host_cr3,
     .update_guest_cr      = vmx_update_guest_cr,
     .update_guest_efer    = vmx_update_guest_efer,
-    .flush_guest_tlbs     = vmx_flush_guest_tlbs,
     .set_tsc_offset       = vmx_set_tsc_offset,
     .inject_exception     = vmx_inject_exception,
     .init_hypercall_page  = vmx_init_hypercall_page,
@@ -1424,9 +1403,6 @@ static struct hvm_function_table __read_
     .set_rdtsc_exiting    = vmx_set_rdtsc_exiting
 };
 
-static unsigned long *vpid_bitmap;
-#define VPID_BITMAP_SIZE (1u << VMCS_VPID_WIDTH)
-
 void start_vmx(void)
 {
     static bool_t bootstrapped;
@@ -1460,17 +1436,6 @@ void start_vmx(void)
 
     if ( cpu_has_vmx_ept )
         vmx_function_table.hap_supported = 1;
-
-    if ( cpu_has_vmx_vpid )
-    {
-        vpid_bitmap = xmalloc_array(
-            unsigned long, BITS_TO_LONGS(VPID_BITMAP_SIZE));
-        BUG_ON(vpid_bitmap == NULL);
-        memset(vpid_bitmap, 0, BITS_TO_LONGS(VPID_BITMAP_SIZE) * sizeof(long));
-
-        /* VPID 0 is used by VMX root mode (the hypervisor). */
-        __set_bit(0, vpid_bitmap);
-    }
 
     setup_vmcs_dump();
 
@@ -1584,7 +1549,7 @@ static void vmx_invlpg_intercept(unsigne
 {
     struct vcpu *curr = current;
     HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
-    if ( paging_invlpg(curr, vaddr) )
+    if ( paging_invlpg(curr, vaddr) && cpu_has_vmx_vpid )
         vpid_sync_vcpu_gva(curr, vaddr);
 }
 
@@ -1929,36 +1894,6 @@ static void vmx_free_vlapic_mapping(stru
     unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
     if ( mfn != 0 )
         free_xenheap_page(mfn_to_virt(mfn));
-}
-
-static int vmx_alloc_vpid(struct vcpu *v)
-{
-    int idx;
-
-    if ( !cpu_has_vmx_vpid )
-        return 0;
-
-    do {
-        idx = find_first_zero_bit(vpid_bitmap, VPID_BITMAP_SIZE);
-        if ( idx >= VPID_BITMAP_SIZE )
-        {
-            dprintk(XENLOG_WARNING, "VMX VPID space exhausted.\n");
-            return -EBUSY;
-        }
-    }
-    while ( test_and_set_bit(idx, vpid_bitmap) );
-
-    v->arch.hvm_vmx.vpid = idx;
-    return 0;
-}
-
-static void vmx_free_vpid(struct vcpu *v)
-{
-    if ( !cpu_has_vmx_vpid )
-        return;
-
-    if ( v->arch.hvm_vmx.vpid )
-        clear_bit(v->arch.hvm_vmx.vpid, vpid_bitmap);
 }
 
 static void vmx_install_vlapic_mapping(struct vcpu *v)
@@ -2675,8 +2610,44 @@ asmlinkage void vmx_vmexit_handler(struc
     }
 }
 
-asmlinkage void vmx_trace_vmentry(void)
-{
+asmlinkage void vmx_vmenter_helper(void)
+{
+    struct vcpu *curr = current;
+    u32 new_asid, old_asid;
+    bool_t need_flush;
+
+    if ( !cpu_has_vmx_vpid )
+        goto out;
+
+    old_asid = curr->arch.hvm_vcpu.asid;
+    need_flush = hvm_asid_handle_vmenter();
+    new_asid = curr->arch.hvm_vcpu.asid;
+
+    if ( unlikely(new_asid != old_asid) )
+    {
+        __vmwrite(VIRTUAL_PROCESSOR_ID, new_asid);
+        if ( !old_asid && new_asid )
+        {
+            /* VPID was disabled: now enabled. */
+            curr->arch.hvm_vmx.secondary_exec_control |=
+                SECONDARY_EXEC_ENABLE_VPID;
+            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+                      curr->arch.hvm_vmx.secondary_exec_control);
+        }
+        else if ( old_asid && !new_asid )
+        {
+            /* VPID was enabled: now disabled. */
+            curr->arch.hvm_vmx.secondary_exec_control &=
+                ~SECONDARY_EXEC_ENABLE_VPID;
+            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+                      curr->arch.hvm_vmx.secondary_exec_control);
+        }
+    }
+
+    if ( unlikely(need_flush) )
+        vpid_sync_all();
+
+ out:
     HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
 }
 
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/include/asm-x86/hvm/asid.h
--- a/xen/include/asm-x86/hvm/asid.h    Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/include/asm-x86/hvm/asid.h    Tue Dec 08 14:14:27 2009 +0000
@@ -21,14 +21,14 @@
 #define __ASM_X86_HVM_ASID_H__
 
 #include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
+
+struct vcpu;
 
 /* Initialise ASID management for the current physical CPU. */
 void hvm_asid_init(int nasids);
 
 /* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
-void hvm_asid_invalidate_asid(struct vcpu *v);
+void hvm_asid_flush_vcpu(struct vcpu *v);
 
 /* Flush all ASIDs on this processor core. */
 void hvm_asid_flush_core(void);
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Tue Dec 08 14:14:27 2009 +0000
@@ -23,6 +23,7 @@
 
 #include <asm/current.h>
 #include <asm/x86_emulate.h>
+#include <asm/hvm/asid.h>
 #include <public/domctl.h>
 #include <public/hvm/save.h>
 
@@ -99,13 +100,6 @@ struct hvm_function_table {
      */
     void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
     void (*update_guest_efer)(struct vcpu *v);
-
-    /*
-     * Called to ensure than all guest-specific mappings in a tagged TLB
-     * are flushed; does *not* flush Xen's TLB entries, and on
-     * processors without a tagged TLB it will be a noop.
-     */
-    void (*flush_guest_tlbs)(void);
 
     void (*set_tsc_offset)(struct vcpu *v, u64 offset);
 
@@ -201,11 +195,15 @@ static inline void hvm_update_guest_efer
     hvm_funcs.update_guest_efer(v);
 }
 
-static inline void 
-hvm_flush_guest_tlbs(void)
+/*
+ * Called to ensure than all guest-specific mappings in a tagged TLB are 
+ * flushed; does *not* flush Xen's TLB entries, and on processors without a 
+ * tagged TLB it will be a noop.
+ */
+static inline void hvm_flush_guest_tlbs(void)
 {
     if ( hvm_enabled )
-        hvm_funcs.flush_guest_tlbs();
+        hvm_asid_flush_core();
 }
 
 void hvm_hypercall_page_initialise(struct domain *d,
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/include/asm-x86/hvm/svm/asid.h
--- a/xen/include/asm-x86/hvm/svm/asid.h        Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/asid.h        Tue Dec 08 14:14:27 2009 +0000
@@ -41,7 +41,7 @@ static inline void svm_asid_g_invlpg(str
 #endif
 
     /* Safe fallback. Take a new ASID. */
-    hvm_asid_invalidate_asid(v);
+    hvm_asid_flush_vcpu(v);
 }
 
 #endif /* __ASM_X86_HVM_SVM_ASID_H__ */
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Dec 08 14:14:27 2009 +0000
@@ -89,8 +89,6 @@ struct arch_vmx_struct {
     /* Cache of cpu execution control. */
     u32                  exec_control;
     u32                  secondary_exec_control;
-
-    u16                  vpid;
 
     /* PMU */
     struct vpmu_struct   vpmu;
diff -r 2d92ad3ef517 -r 7f611de6b93c xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue Dec 08 10:33:08 2009 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Dec 08 14:14:27 2009 +0000
@@ -314,20 +314,12 @@ void ept_sync_domain(struct domain *d);
 
 static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva)
 {
-    if ( cpu_has_vmx_vpid )
-        __invvpid(0, v->arch.hvm_vmx.vpid, (u64)gva);
-}
-
-static inline void vpid_sync_vcpu_all(struct vcpu *v)
-{
-    if ( cpu_has_vmx_vpid )
-        __invvpid(1, v->arch.hvm_vmx.vpid, 0);
+    __invvpid(0, v->arch.hvm_vcpu.asid, (u64)gva);
 }
 
 static inline void vpid_sync_all(void)
 {
-    if ( cpu_has_vmx_vpid )
-        __invvpid(2, 0, 0);
+    __invvpid(2, 0, 0);
 }
 
 static inline void __vmxoff(void)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.