[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Fix the flush-all-tlbs hypercall.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1191488237 -3600
# Node ID 6903e3f3bdd6831b6680b606ec3bc95dc1f66f9b
# Parent  4881f984e06ecb481ab49e4c37046d387053535b
hvm: Fix the flush-all-tlbs hypercall.
From: Peter Johnston <pjohnston@xxxxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c  |   32 ++++++++++++++++++++++++++++++--
 xen/common/domain.c     |    1 +
 xen/include/xen/sched.h |    6 ++++++
 3 files changed, 37 insertions(+), 2 deletions(-)

diff -r 4881f984e06e -r 6903e3f3bdd6 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Oct 04 09:40:31 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu Oct 04 09:57:17 2007 +0100
@@ -1661,14 +1661,38 @@ static int hvmop_set_pci_link_route(
 
 static int hvmop_flush_tlb_all(void)
 {
+    struct domain *d = current->domain;
     struct vcpu *v;
 
+    /* Avoid deadlock if more than one vcpu tries this at the same time. */
+    if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+        return -EAGAIN;
+
+    /* Pause all other vcpus. */
+    for_each_vcpu ( d, v )
+        if ( v != current )
+            vcpu_pause_nosync(v);
+
+    /* Now that all VCPUs are signalled to deschedule, we wait... */
+    for_each_vcpu ( d, v )
+        if ( v != current )
+            while ( !vcpu_runnable(v) && v->is_running )
+                cpu_relax();
+
+    /* All other vcpus are paused, safe to unlock now. */
+    spin_unlock(&d->hypercall_deadlock_mutex);
+
     /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
-    for_each_vcpu ( current->domain, v )
+    for_each_vcpu ( d, v )
         paging_update_cr3(v);
 
     /* Flush all dirty TLBs. */
-    flush_tlb_mask(current->domain->domain_dirty_cpumask);
+    flush_tlb_mask(d->domain_dirty_cpumask);
+
+    /* Done. */
+    for_each_vcpu ( d, v )
+        if ( v != current )
+            vcpu_unpause(v);
 
     return 0;
 }
@@ -1780,6 +1804,10 @@ long do_hvm_op(unsigned long op, XEN_GUE
     }
     }
 
+    if ( rc == -EAGAIN )
+        rc = hypercall_create_continuation(
+            __HYPERVISOR_hvm_op, "lh", op, arg);
+
     return rc;
 }
 
diff -r 4881f984e06e -r 6903e3f3bdd6 xen/common/domain.c
--- a/xen/common/domain.c       Thu Oct 04 09:40:31 2007 +0100
+++ b/xen/common/domain.c       Thu Oct 04 09:57:17 2007 +0100
@@ -68,6 +68,7 @@ struct domain *alloc_domain(domid_t domi
     spin_lock_init(&d->big_lock);
     spin_lock_init(&d->page_alloc_lock);
     spin_lock_init(&d->shutdown_lock);
+    spin_lock_init(&d->hypercall_deadlock_mutex);
     INIT_LIST_HEAD(&d->page_list);
     INIT_LIST_HEAD(&d->xenpage_list);
 
diff -r 4881f984e06e -r 6903e3f3bdd6 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Thu Oct 04 09:40:31 2007 +0100
+++ b/xen/include/xen/sched.h   Thu Oct 04 09:57:17 2007 +0100
@@ -227,6 +227,12 @@ struct domain
     int32_t time_offset_seconds;
 
     struct rcu_head rcu;
+
+    /*
+     * Hypercall deadlock avoidance lock. Used if a hypercall might
+     * cause a deadlock. Acquirers don't spin waiting; they preempt.
+     */
+    spinlock_t hypercall_deadlock_mutex;
 };
 
 struct domain_setup_info

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.