[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Pull SVM ASID management into common HVM code where it can be shared.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1260268388 0
# Node ID 2d92ad3ef517208240aa3d00e22516cf885ef351
# Parent  3122518646d3ac44e4d0c76ac83228acc4d31dbe
hvm: Pull SVM ASID management into common HVM code where it can be shared.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/Makefile          |    1 
 xen/arch/x86/hvm/asid.c            |  150 +++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/asid.c        |  204 ++-----------------------------------
 xen/arch/x86/hvm/svm/svm.c         |    6 -
 xen/arch/x86/hvm/svm/vmcb.c        |    2 
 xen/include/asm-x86/hvm/asid.h     |   50 +++++++++
 xen/include/asm-x86/hvm/svm/asid.h |    6 -
 xen/include/asm-x86/hvm/svm/vmcb.h |    1 
 xen/include/asm-x86/hvm/vcpu.h     |    3 
 9 files changed, 224 insertions(+), 199 deletions(-)

diff -r 3122518646d3 -r 2d92ad3ef517 xen/arch/x86/hvm/Makefile
--- a/xen/arch/x86/hvm/Makefile Tue Dec 08 07:55:21 2009 +0000
+++ b/xen/arch/x86/hvm/Makefile Tue Dec 08 10:33:08 2009 +0000
@@ -1,6 +1,7 @@ subdir-y += svm
 subdir-y += svm
 subdir-y += vmx
 
+obj-y += asid.o
 obj-y += emulate.o
 obj-y += hpet.o
 obj-y += hvm.o
diff -r 3122518646d3 -r 2d92ad3ef517 xen/arch/x86/hvm/asid.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/asid.c   Tue Dec 08 10:33:08 2009 +0000
@@ -0,0 +1,150 @@
+/*
+ * asid.c: ASID management
+ * Copyright (c) 2007, Advanced Micro Devices, Inc.
+ * Copyright (c) 2009, Citrix Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/perfc.h>
+#include <asm/hvm/asid.h>
+
+/*
+ * ASIDs partition the physical TLB.  In the current implementation ASIDs are
+ * introduced to reduce the number of TLB flushes.  Each time the guest's
+ * virtual address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4}
+ * operation), instead of flushing the TLB, a new ASID is assigned.  This
+ * reduces the number of TLB flushes to at most 1/#ASIDs.  The biggest
+ * advantage is that hot parts of the hypervisor's code and data retain in
+ * the TLB.
+ *
+ * Sketch of the Implementation:
+ *
+ * ASIDs are a CPU-local resource.  As preemption of ASIDs is not possible,
+ * ASIDs are assigned in a round-robin scheme.  To minimize the overhead of
+ * ASID invalidation, at the time of a TLB flush,  ASIDs are tagged with a
+ * 64-bit generation.  Only on a generation overflow the code needs to
+ * invalidate all ASID information stored at the VCPUs with are run on the
+ * specific physical processor.  This overflow appears after about 2^80
+ * host processor cycles, so we do not optimize this case, but simply disable
+ * ASID useage to retain correctness.
+ */
+
+/* Per-CPU ASID management. */
+struct hvm_asid_data {
+   u64 core_asid_generation;
+   u32 next_asid;
+   u32 max_asid;
+   bool_t disabled;
+   bool_t initialised;
+};
+
+static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data);
+
+void hvm_asid_init(int nasids)
+{
+    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
+
+    /*
+     * If already initialised, we just bump the generation to force a TLB
+     * flush. Resetting the generation could be dangerous, if VCPUs still
+     * exist that reference earlier generations on this CPU.
+     */
+    if ( test_and_set_bool(data->initialised) )
+        return hvm_asid_flush_core();
+
+    data->max_asid = nasids - 1;
+    data->disabled = (nasids <= 1);
+
+    printk("HVM: ASIDs %s \n",
+           (data->disabled ? "disabled." : "enabled."));
+
+    /* Zero indicates 'invalid generation', so we start the count at one. */
+    data->core_asid_generation = 1;
+
+    /* Zero indicates 'ASIDs disabled', so we start the count at one. */
+    data->next_asid = 1;
+}
+
+void hvm_asid_invalidate_asid(struct vcpu *v)
+{
+    v->arch.hvm_vcpu.asid_generation = 0;
+}
+
+void hvm_asid_flush_core(void)
+{
+    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
+
+    if ( data->disabled )
+        return;
+
+    if ( likely(++data->core_asid_generation != 0) )
+    {
+        data->next_asid = 1;
+        return;
+    }
+
+    /*
+     * ASID generations are 64 bit.  Overflow of generations never happens.
+     * For safety, we simply disable ASIDs, so correctness is established; it
+     * only runs a bit slower.
+     */
+    printk("HVM: ASID generation overrun. Disabling ASIDs.\n");
+    data->disabled = 1;
+}
+
+bool_t hvm_asid_handle_vmenter(void)
+{
+    struct vcpu *curr = current;
+    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
+
+    /* On erratum #170 systems we must flush the TLB. 
+     * Generation overruns are taken here, too. */
+    if ( data->disabled )
+    {
+        curr->arch.hvm_vcpu.asid = 0;
+        return 0;
+    }
+
+    /* Test if VCPU has valid ASID. */
+    if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation )
+        return 0;
+
+    /* If there are no free ASIDs, need to go to a new generation */
+    if ( unlikely(data->next_asid > data->max_asid) )
+        hvm_asid_flush_core();
+
+    /* Now guaranteed to be a free ASID. */
+    curr->arch.hvm_vcpu.asid = data->next_asid++;
+    curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation;
+
+    /*
+     * When we assign ASID 1, flush all TLB entries as we are starting a new
+     * generation, and all old ASID allocations are now stale. 
+     */
+    return (curr->arch.hvm_vcpu.asid == 1);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 3122518646d3 -r 2d92ad3ef517 xen/arch/x86/hvm/svm/asid.c
--- a/xen/arch/x86/hvm/svm/asid.c       Tue Dec 08 07:55:21 2009 +0000
+++ b/xen/arch/x86/hvm/svm/asid.c       Tue Dec 08 10:33:08 2009 +0000
@@ -22,164 +22,16 @@
 #include <xen/perfc.h>
 #include <asm/hvm/svm/asid.h>
 
-/*
- * This is the interface to SVM's ASID management.  ASIDs partition the
- * physical TLB for SVM.  In the current implementation ASIDs are introduced
- * to reduce the number of TLB flushes.  Each time the guest's virtual
- * address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4} operation),
- * instead of flushing the TLB, a new ASID is assigned.  This reduces the
- * number of TLB flushes to at most 1/#ASIDs (currently 1/64).  The biggest
- * advantage is that hot parts of the hypervisor's code and data retain in
- * the TLB.
- *
- * Sketch of the Implementation:
- *
- * ASIDs are a CPU-local resource.  As preemption of ASIDs is not possible,
- * ASIDs are assigned in a round-robin scheme.  To minimize the overhead of
- * ASID invalidation, at the time of a TLB flush,  ASIDs are tagged with a
- * 64-bit generation.  Only on a generation overflow the code needs to
- * invalidate all ASID information stored at the VCPUs with are run on the
- * specific physical processor.  This overflow appears after about 2^80
- * host processor cycles, so we do not optimize this case, but simply disable
- * ASID useage to retain correctness.
- */
-
-/* usable guest asids  [ 1 .. get_max_asid() ) */
-#define SVM_ASID_FIRST_GUEST_ASID       1
-
-#define SVM_ASID_FIRST_GENERATION       0
-
-/* triggers the flush of all generations on all VCPUs */
-#define SVM_ASID_LAST_GENERATION        (0xfffffffffffffffd)
-
-/* triggers assignment of new ASID to a VCPU */
-#define SVM_ASID_INVALID_GENERATION     (SVM_ASID_LAST_GENERATION + 1)
-
-/* Per-CPU ASID management. */
-struct svm_asid_data {
-   u64 core_asid_generation;
-   u32 next_asid;
-   u32 max_asid;
-   u32 erratum170:1;
-   u32 initialised:1;
-};
-
-static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data);
-
-/*
- * Get handle to CPU-local ASID management data.
- */
-static struct svm_asid_data *svm_asid_core_data(void)
-{
-    return &this_cpu(svm_asid_data);
-}
-
-/*
- * Init ASID management for the current physical CPU.
- */
 void svm_asid_init(struct cpuinfo_x86 *c)
 {
-    int nasids;
-    struct svm_asid_data *data = svm_asid_core_data();
+    int nasids = 0;
 
-    /*
-     * If already initialised, we just bump the generation to force a TLB
-     * flush. Resetting the generation could be dangerous, if VCPUs still
-     * exist that reference earlier generations on this CPU.
-     */
-    if ( data->initialised )
-        return svm_asid_inc_generation();
-    data->initialised = 1;
+    /* Check for erratum #170, and leave ASIDs disabled if it's present. */
+    if ( (c->x86 == 0x10) ||
+         ((c->x86 == 0xf) && (c->x86_model >= 0x68) && (c->x86_mask >= 1)) )
+        nasids = cpuid_ebx(0x8000000A);
 
-    /* Find #ASID. */
-    nasids = cpuid_ebx(0x8000000A);
-    data->max_asid = nasids - 1;
-
-    /* Check if we can use ASIDs. */
-    data->erratum170 =
-        !((c->x86 == 0x10) ||
-          ((c->x86 == 0xf) && (c->x86_model >= 0x68) && (c->x86_mask >= 1)));
-
-    printk("AMD SVM: ASIDs %s \n",
-           (data->erratum170 ? "disabled." : "enabled."));
-
-    /* Initialize ASID assigment. */
-    if ( data->erratum170 )
-    {
-        /* On errata #170, VCPUs and phys processors should have same
-          generation.  We set both to invalid. */
-        data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
-    }
-    else
-    {
-        data->core_asid_generation = SVM_ASID_FIRST_GENERATION;
-    }
-
-    /* ASIDs are assigned round-robin.  Start with the first. */
-    data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
-}
-
-/*
- * Force VCPU to fetch a new ASID.
- */
-void svm_asid_init_vcpu(struct vcpu *v)
-{
-    struct svm_asid_data *data = svm_asid_core_data();
-
-    /* Trigger asignment of a new ASID. */
-    v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
-
-    /*
-     * This erratum is bound to a physical processor.  The tlb_control
-     * field is not changed by the processor.  We only set tlb_control
-     * on VMCB creation and on a migration.
-     */
-    if ( data->erratum170 )
-    {
-        /* Flush TLB every VMRUN to handle Errata #170. */
-        v->arch.hvm_svm.vmcb->tlb_control = 1;
-        /* All guests use same ASID. */
-        v->arch.hvm_svm.vmcb->guest_asid  = 1;
-    }
-    else
-    {
-        /* These fields are handled on VMRUN */
-        v->arch.hvm_svm.vmcb->tlb_control = 0;
-        v->arch.hvm_svm.vmcb->guest_asid  = 0;
-    }
-}
-
-/*
- * Increase the Generation to make free ASIDs, and indirectly cause a 
- * TLB flush of all ASIDs on the next vmrun.
- */
-void svm_asid_inc_generation(void)
-{
-    struct svm_asid_data *data = svm_asid_core_data();
-
-    if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) )
-    {
-        /* Move to the next generation.  We can't flush the TLB now
-         * because you need to vmrun to do that, and current might not
-         * be a HVM vcpu, but the first HVM vcpu that runs after this 
-         * will pick up ASID 1 and flush the TLBs. */
-        data->core_asid_generation++;
-        data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
-        return;
-    }
-
-    /*
-     * ASID generations are 64 bit.  Overflow of generations never happens.
-     * For safety, we simply disable ASIDs and switch to erratum #170 mode on
-     * this core (flushing TLB always). So correctness is established; it
-     * only runs a bit slower.
-     */
-    if ( !data->erratum170 )
-    {
-        printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
-        data->erratum170 = 1;
-        data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
-    }
+    hvm_asid_init(nasids);
 }
 
 /*
@@ -188,47 +40,19 @@ void svm_asid_inc_generation(void)
  */
 asmlinkage void svm_asid_handle_vmrun(void)
 {
-    struct vcpu *v = current;
-    struct svm_asid_data *data = svm_asid_core_data();
+    struct vcpu *curr = current;
+    bool_t need_flush = hvm_asid_handle_vmenter();
 
-    /* On erratum #170 systems we must flush the TLB. 
-     * Generation overruns are taken here, too. */
-    if ( data->erratum170 )
+    /* ASID 0 indicates that ASIDs are disabled. */
+    if ( curr->arch.hvm_vcpu.asid == 0 )
     {
-        v->arch.hvm_svm.vmcb->guest_asid  = 1;
-        v->arch.hvm_svm.vmcb->tlb_control = 1;
+        curr->arch.hvm_svm.vmcb->guest_asid  = 1;
+        curr->arch.hvm_svm.vmcb->tlb_control = 1;
         return;
     }
 
-    /* Test if VCPU has valid ASID. */
-    if ( likely(v->arch.hvm_svm.asid_generation ==
-                data->core_asid_generation) )
-    {
-        /* May revert previous TLB-flush command. */
-        v->arch.hvm_svm.vmcb->tlb_control = 0;
-        return;
-    }
-
-    /* If there are no free ASIDs, need to go to a new generation */
-    if ( unlikely(data->next_asid > data->max_asid) )
-        svm_asid_inc_generation();
-
-    /* Now guaranteed to be a free ASID. */
-    v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
-    v->arch.hvm_svm.asid_generation  = data->core_asid_generation;
-
-    /* When we assign ASID 1, flush all TLB entries.  We need to do it 
-     * here because svm_asid_inc_generation() can be called at any time, 
-     * but the TLB flush can only happen on vmrun. */
-    if ( v->arch.hvm_svm.vmcb->guest_asid == SVM_ASID_FIRST_GUEST_ASID )
-        v->arch.hvm_svm.vmcb->tlb_control = 1;
-    else
-        v->arch.hvm_svm.vmcb->tlb_control = 0;
-}
-
-void svm_asid_inv_asid(struct vcpu *v)
-{
-    v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
+    curr->arch.hvm_svm.vmcb->guest_asid  = curr->arch.hvm_vcpu.asid;
+    curr->arch.hvm_svm.vmcb->tlb_control = need_flush;
 }
 
 /*
diff -r 3122518646d3 -r 2d92ad3ef517 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Dec 08 07:55:21 2009 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Dec 08 10:33:08 2009 +0000
@@ -424,7 +424,7 @@ static void svm_update_guest_cr(struct v
         break;
     case 3:
         vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
-        svm_asid_inv_asid(v);
+        hvm_asid_invalidate_asid(v);
         break;
     case 4:
         vmcb->cr4 = HVM_CR4_HOST_MASK;
@@ -460,7 +460,7 @@ static void svm_flush_guest_tlbs(void)
     /* Roll over the CPU's ASID generation, so it gets a clean TLB when we
      * next VMRUN.  (If ASIDs are disabled, the whole TLB is flushed on
      * VMRUN anyway). */
-    svm_asid_inc_generation();
+    hvm_asid_flush_core();
 }
 
 static void svm_sync_vmcb(struct vcpu *v)
@@ -704,7 +704,7 @@ static void svm_do_resume(struct vcpu *v
         hvm_migrate_timers(v);
 
         /* Migrating to another ASID domain.  Request a new ASID. */
-        svm_asid_init_vcpu(v);
+        hvm_asid_invalidate_asid(v);
     }
 
     /* Reflect the vlapic's TPR in the hardware vtpr */
diff -r 3122518646d3 -r 2d92ad3ef517 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue Dec 08 07:55:21 2009 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Tue Dec 08 10:33:08 2009 +0000
@@ -115,7 +115,7 @@ static int construct_vmcb(struct vcpu *v
     struct vmcb_struct *vmcb = arch_svm->vmcb;
 
     /* TLB control, and ASID assigment. */
-    svm_asid_init_vcpu(v);
+    hvm_asid_invalidate_asid(v);
 
     vmcb->general1_intercepts = 
         GENERAL1_INTERCEPT_INTR        | GENERAL1_INTERCEPT_NMI         |
diff -r 3122518646d3 -r 2d92ad3ef517 xen/include/asm-x86/hvm/asid.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/hvm/asid.h    Tue Dec 08 10:33:08 2009 +0000
@@ -0,0 +1,50 @@
+/*
+ * asid.h: ASID management
+ * Copyright (c) 2007, Advanced Micro Devices, Inc.
+ * Copyright (c) 2009, Citrix Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __ASM_X86_HVM_ASID_H__
+#define __ASM_X86_HVM_ASID_H__
+
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <asm/processor.h>
+
+/* Initialise ASID management for the current physical CPU. */
+void hvm_asid_init(int nasids);
+
+/* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
+void hvm_asid_invalidate_asid(struct vcpu *v);
+
+/* Flush all ASIDs on this processor core. */
+void hvm_asid_flush_core(void);
+
+/* Called before entry to guest context. Checks ASID allocation, returns a
+ * boolean indicating whether all ASIDs must be flushed. */
+bool_t hvm_asid_handle_vmenter(void);
+
+#endif /* __ASM_X86_HVM_ASID_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 3122518646d3 -r 2d92ad3ef517 xen/include/asm-x86/hvm/svm/asid.h
--- a/xen/include/asm-x86/hvm/svm/asid.h        Tue Dec 08 07:55:21 2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/asid.h        Tue Dec 08 10:33:08 2009 +0000
@@ -22,15 +22,13 @@
 #include <xen/config.h>
 #include <asm/types.h>
 #include <asm/hvm/hvm.h>
+#include <asm/hvm/asid.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/vmcb.h>
 #include <asm/percpu.h>
 
 void svm_asid_init(struct cpuinfo_x86 *c);
-void svm_asid_init_vcpu(struct vcpu *v);
-void svm_asid_inv_asid(struct vcpu *v);
-void svm_asid_inc_generation(void);
 
 static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
 {
@@ -43,7 +41,7 @@ static inline void svm_asid_g_invlpg(str
 #endif
 
     /* Safe fallback. Take a new ASID. */
-    svm_asid_inv_asid(v);
+    hvm_asid_invalidate_asid(v);
 }
 
 #endif /* __ASM_X86_HVM_SVM_ASID_H__ */
diff -r 3122518646d3 -r 2d92ad3ef517 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Dec 08 07:55:21 2009 +0000
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Dec 08 10:33:08 2009 +0000
@@ -457,7 +457,6 @@ struct arch_svm_struct {
 struct arch_svm_struct {
     struct vmcb_struct *vmcb;
     u64    vmcb_pa;
-    u64    asid_generation; /* ASID tracking, moved here for cache locality. */
     unsigned long *msrpm;
     int    launch_core;
     bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
diff -r 3122518646d3 -r 2d92ad3ef517 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Tue Dec 08 07:55:21 2009 +0000
+++ b/xen/include/asm-x86/hvm/vcpu.h    Tue Dec 08 10:33:08 2009 +0000
@@ -70,6 +70,9 @@ struct hvm_vcpu {
     bool_t              debug_state_latch;
     bool_t              single_step;
 
+    u64                 asid_generation;
+    u32                 asid;
+
     union {
         struct arch_vmx_struct vmx;
         struct arch_svm_struct svm;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.