[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2 of 3] PoD: Allow pod_set_cache_target hypercall to be preempted



# HG changeset patch
# User George Dunlap <george.dunlap@xxxxxxxxxxxxx>
# Date 1295274253 0
# Node ID 55e123a24da84f3b83caa7a7332699df73aaa90d
# Parent  366d675630fd6ecbd6228426b3f7723d8a9dd944
PoD: Allow pod_set_cache_target hypercall to be preempted

For very large VMs, setting the cache target can take long enough that
dom0 complains of soft lockups.  Allow the hypercall to be preempted.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

diff -r 366d675630fd -r 55e123a24da8 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon Jan 17 14:24:10 2011 +0000
+++ b/xen/arch/x86/domain.c     Mon Jan 17 14:24:13 2011 +0000
@@ -1653,8 +1653,8 @@
     unsigned long nval = 0;
     va_list args;
 
-    BUG_ON(*id > 5);
-    BUG_ON(mask & (1U << *id));
+    BUG_ON(id && *id > 5);
+    BUG_ON(id && (mask & (1U << *id)));
 
     va_start(args, mask);
 
diff -r 366d675630fd -r 55e123a24da8 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Jan 17 14:24:10 2011 +0000
+++ b/xen/arch/x86/mm.c Mon Jan 17 14:24:13 2011 +0000
@@ -4799,15 +4799,23 @@
             rc = p2m_pod_set_mem_target(d, target.target_pages);
         }
 
-        p2m = p2m_get_hostp2m(d);
-        target.tot_pages       = d->tot_pages;
-        target.pod_cache_pages = p2m->pod.count;
-        target.pod_entries     = p2m->pod.entry_count;
-
-        if ( copy_to_guest(arg, &target, 1) )
+        if ( rc == -EAGAIN )
         {
-            rc= -EFAULT;
-            goto pod_target_out_unlock;
+            rc = hypercall_create_continuation(
+                __HYPERVISOR_memory_op, "lh", op, arg);
+        }
+        else if ( rc >= 0 )
+        {
+            p2m = p2m_get_hostp2m(d);
+            target.tot_pages       = d->tot_pages;
+            target.pod_cache_pages = p2m->pod.count;
+            target.pod_entries     = p2m->pod.entry_count;
+
+            if ( copy_to_guest(arg, &target, 1) )
+            {
+                rc= -EFAULT;
+                goto pod_target_out_unlock;
+            }
         }
         
     pod_target_out_unlock:
diff -r 366d675630fd -r 55e123a24da8 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Mon Jan 17 14:24:10 2011 +0000
+++ b/xen/arch/x86/mm/p2m.c     Mon Jan 17 14:24:13 2011 +0000
@@ -435,7 +435,7 @@
 
 /* Set the size of the cache, allocating or freeing as necessary. */
 static int
-p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target)
+p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int 
preemptible)
 {
     struct domain *d = p2m->domain;
     int ret = 0;
@@ -468,6 +468,12 @@
         }
 
         p2m_pod_cache_add(p2m, page, order);
+
+        if ( hypercall_preempt_check() && preemptible )
+        {
+            ret = -EAGAIN;
+            goto out;
+        }
     }
 
     /* Decreasing the target */
@@ -512,6 +518,12 @@
                 put_page(page+i);
 
             put_page(page+i);
+
+            if ( hypercall_preempt_check() && preemptible )
+            {
+                ret = -EAGAIN;
+                goto out;
+            }
         }
     }
 
@@ -589,7 +601,7 @@
 
     ASSERT( pod_target >= p2m->pod.count );
 
-    ret = p2m_pod_set_cache_target(p2m, pod_target);
+    ret = p2m_pod_set_cache_target(p2m, pod_target, 1/*preemptible*/);
 
 out:
     p2m_unlock(p2m);
@@ -753,7 +765,7 @@
     /* If we've reduced our "liabilities" beyond our "assets", free some */
     if ( p2m->pod.entry_count < p2m->pod.count )
     {
-        p2m_pod_set_cache_target(p2m, p2m->pod.entry_count);
+        p2m_pod_set_cache_target(p2m, p2m->pod.entry_count, 0/*can't 
preempt*/);
     }
 
 out_unlock:
diff -r 366d675630fd -r 55e123a24da8 xen/arch/x86/x86_64/compat/mm.c
--- a/xen/arch/x86/x86_64/compat/mm.c   Mon Jan 17 14:24:10 2011 +0000
+++ b/xen/arch/x86/x86_64/compat/mm.c   Mon Jan 17 14:24:13 2011 +0000
@@ -127,6 +127,9 @@
         if ( rc < 0 )
             break;
 
+        if ( rc == __HYPERVISOR_memory_op )
+            hypercall_xlat_continuation(NULL, 0x2, nat, arg);
+
         XLAT_pod_target(&cmp, nat);
 
         if ( copy_to_guest(arg, &cmp, 1) )

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.