[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] EPT: More efficient ept_sync_domain().



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1253607505 -3600
# Node ID 5094980f9e167fddf6f714fd73d6a248f1f4cd9a
# Parent  8c4685fc198ef4b5ea8accf30cb0b6b828cef54f
EPT: More efficient ept_sync_domain().

Rather than always flushing all CPUs, only flush CPUs this domain is
currently active on, and defer flushing other CPUs until this domain
is scheduled onto them (or the domain is destroyed).

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/domain.c              |   13 +++++++++----
 xen/arch/x86/hvm/vmx/vmx.c         |   34 ++++++++++++++++++++++++++++------
 xen/include/asm-x86/hvm/vmx/vmcs.h |    1 +
 3 files changed, 38 insertions(+), 10 deletions(-)

diff -r 8c4685fc198e -r 5094980f9e16 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Sep 22 08:37:32 2009 +0100
+++ b/xen/arch/x86/domain.c     Tue Sep 22 09:18:25 2009 +0100
@@ -1328,6 +1328,15 @@ static void __context_switch(void)
         p->arch.ctxt_switch_from(p);
     }
 
+    /*
+     * Mark this CPU in next domain's dirty cpumasks before calling
+     * ctxt_switch_to(). This avoids a race on things like EPT flushing,
+     * which is synchronised on that function.
+     */
+    if ( p->domain != n->domain )
+        cpu_set(cpu, n->domain->domain_dirty_cpumask);
+    cpu_set(cpu, n->vcpu_dirty_cpumask);
+
     if ( !is_idle_vcpu(n) )
     {
         memcpy(stack_regs,
@@ -1335,10 +1344,6 @@ static void __context_switch(void)
                CTXT_SWITCH_STACK_BYTES);
         n->arch.ctxt_switch_to(n);
     }
-
-    if ( p->domain != n->domain )
-        cpu_set(cpu, n->domain->domain_dirty_cpumask);
-    cpu_set(cpu, n->vcpu_dirty_cpumask);
 
     gdt = !is_pv_32on64_vcpu(n) ? per_cpu(gdt_table, cpu) :
                                   per_cpu(compat_gdt_table, cpu);
diff -r 8c4685fc198e -r 5094980f9e16 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Sep 22 08:37:32 2009 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Sep 22 09:18:25 2009 +0100
@@ -72,6 +72,7 @@ static int vmx_msr_read_intercept(struct
 static int vmx_msr_read_intercept(struct cpu_user_regs *regs);
 static int vmx_msr_write_intercept(struct cpu_user_regs *regs);
 static void vmx_invlpg_intercept(unsigned long vaddr);
+static void __ept_sync_domain(void *info);
 
 static int vmx_domain_initialise(struct domain *d)
 {
@@ -91,7 +92,8 @@ static int vmx_domain_initialise(struct 
 
 static void vmx_domain_destroy(struct domain *d)
 {
-    ept_sync_domain(d);
+    if ( d->arch.hvm_domain.hap_enabled )
+        on_each_cpu(__ept_sync_domain, d, 1);
     vmx_free_vlapic_mapping(d);
 }
 
@@ -666,9 +668,20 @@ static void vmx_ctxt_switch_from(struct 
 
 static void vmx_ctxt_switch_to(struct vcpu *v)
 {
+    struct domain *d = v->domain;
+
     /* HOST_CR4 in VMCS is always mmu_cr4_features. Sync CR4 now. */
     if ( unlikely(read_cr4() != mmu_cr4_features) )
         write_cr4(mmu_cr4_features);
+
+    if ( d->arch.hvm_domain.hap_enabled )
+    {
+        unsigned int cpu = smp_processor_id();
+        /* Test-and-test-and-set this CPU in the EPT-is-synced mask. */
+        if ( !cpu_isset(cpu, d->arch.hvm_domain.vmx.ept_synced) &&
+             !cpu_test_and_set(cpu, d->arch.hvm_domain.vmx.ept_synced) )
+            __invept(1, d->arch.hvm_domain.vmx.ept_control.eptp, 0);
+    }
 
     vmx_restore_guest_msrs(v);
     vmx_restore_dr(v);
@@ -1216,11 +1229,20 @@ void ept_sync_domain(struct domain *d)
 void ept_sync_domain(struct domain *d)
 {
     /* Only if using EPT and this domain has some VCPUs to dirty. */
-    if ( d->arch.hvm_domain.hap_enabled && d->vcpu && d->vcpu[0] )
-    {
-        ASSERT(local_irq_is_enabled());
-        on_each_cpu(__ept_sync_domain, d, 1);
-    }
+    if ( !d->arch.hvm_domain.hap_enabled || !d->vcpu || !d->vcpu[0] )
+        return;
+
+    ASSERT(local_irq_is_enabled());
+
+    /*
+     * Flush active cpus synchronously. Flush others the next time this domain
+     * is scheduled onto them. We accept the race of other CPUs adding to
+     * the ept_synced mask before on_selected_cpus() reads it, resulting in
+     * unnecessary extra flushes, to avoid allocating a cpumask_t on the stack.
+     */
+    d->arch.hvm_domain.vmx.ept_synced = d->domain_dirty_cpumask;
+    on_selected_cpus(&d->arch.hvm_domain.vmx.ept_synced,
+                     __ept_sync_domain, d, 1);
 }
 
 static void __vmx_inject_exception(int trap, int type, int error_code)
diff -r 8c4685fc198e -r 5094980f9e16 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Sep 22 08:37:32 2009 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Sep 22 09:18:25 2009 +0100
@@ -67,6 +67,7 @@ struct vmx_domain {
         };
         u64 eptp;
     } ept_control;
+    cpumask_t ept_synced;
 };
 
 struct arch_vmx_struct {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.