[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [HVM] Save/restore cleanups: don't save state for downed vcpus.



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1170089369 0
# Node ID 99d36a1530249cc4b918e83dfc5a62324f63b3bf
# Parent  5bb0840984936749b4e99f53931d72a8579d302c
[HVM] Save/restore cleanups: don't save state for downed vcpus.
(Since we wouldn't load it anyway)
Also tidy up the plumbing around the hypercalls.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
 xen/arch/x86/domctl.c             |   20 ++++++++----------
 xen/arch/x86/hvm/hvm.c            |   15 ++++---------
 xen/arch/x86/hvm/intercept.c      |   41 +++++++++-----------------------------
 xen/include/asm-x86/hvm/support.h |    7 +-----
 4 files changed, 26 insertions(+), 57 deletions(-)

diff -r 5bb084098493 -r 99d36a153024 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Mon Jan 29 13:22:21 2007 +0000
+++ b/xen/arch/x86/domctl.c     Mon Jan 29 16:49:29 2007 +0000
@@ -290,7 +290,6 @@ long arch_do_domctl(
     { 
         struct hvm_domain_context *c;
         struct domain             *d;
-        struct vcpu               *v;
 
         ret = -ESRCH;
         if ( (d = get_domain_by_id(domctl->domain)) == NULL )
@@ -299,15 +298,16 @@ long arch_do_domctl(
         ret = -ENOMEM;
         if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
             goto sethvmcontext_out;
-
-        v = d->vcpu[0];
         
         ret = -EFAULT;
-
         if ( copy_from_guest(c, domctl->u.hvmcontext.ctxt, 1) != 0 )
             goto sethvmcontext_out;
 
-        ret = arch_sethvm_ctxt(v, c);
+        ret = -EINVAL;
+        if ( !is_hvm_domain(d) ) 
+            goto sethvmcontext_out;
+
+        ret = hvm_load(d, c);
 
         xfree(c);
 
@@ -321,7 +321,6 @@ long arch_do_domctl(
     { 
         struct hvm_domain_context *c;
         struct domain             *d;
-        struct vcpu               *v;
 
         ret = -ESRCH;
         if ( (d = get_domain_by_id(domctl->domain)) == NULL )
@@ -330,15 +329,14 @@ long arch_do_domctl(
         ret = -ENOMEM;
         if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
             goto gethvmcontext_out;
-
-        v = d->vcpu[0];
-
+        memset(c, 0, sizeof(*c));
+        
         ret = -ENODATA;
-        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+        if ( !is_hvm_domain(d) ) 
             goto gethvmcontext_out;
         
         ret = 0;
-        if (arch_gethvm_ctxt(v, c) == -1)
+        if (hvm_save(d, c) != 0)
             ret = -EFAULT;
 
         if ( copy_to_guest(domctl->u.hvmcontext.ctxt, c, 1) )
diff -r 5bb084098493 -r 99d36a153024 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Jan 29 13:22:21 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Mon Jan 29 16:49:29 2007 +0000
@@ -189,31 +189,26 @@ void hvm_domain_destroy(struct domain *d
         unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
 }
 
-#define HVM_VCPU_CTXT_MAGIC 0x85963130
 void hvm_save_cpu_ctxt(hvm_domain_context_t *h, void *opaque)
 {
     struct vcpu *v = opaque;
 
-    if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) {
-        hvm_put_32u(h, 0x0);
+    /* We don't need to save state for a vcpu that is down; the restore 
+     * code will leave it down if there is nothing saved. */
+    if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) 
         return;
-    }
-
-    hvm_put_32u(h, HVM_VCPU_CTXT_MAGIC);
+
     hvm_funcs.save_cpu_ctxt(h, opaque);
 }
 
 int hvm_load_cpu_ctxt(hvm_domain_context_t *h, void *opaque, int version)
 {
     struct vcpu *v = opaque;
-
-    if ( hvm_get_32u(h) != HVM_VCPU_CTXT_MAGIC )
-        return 0;
 
     if ( hvm_funcs.load_cpu_ctxt(h, opaque, version) < 0 )
         return -EINVAL;
 
-    /* Auxiliary processors shoudl be woken immediately. */
+    /* Auxiliary processors should be woken immediately. */
     if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
         vcpu_wake(v);
 
diff -r 5bb084098493 -r 99d36a153024 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Mon Jan 29 13:22:21 2007 +0000
+++ b/xen/arch/x86/hvm/intercept.c      Mon Jan 29 16:49:29 2007 +0000
@@ -190,20 +190,13 @@ int hvm_register_savevm(struct domain *d
     return 0;
 }
 
-int hvm_save(struct vcpu *v, hvm_domain_context_t *h)
+int hvm_save(struct domain *d, hvm_domain_context_t *h)
 {
     uint32_t len, len_pos, cur_pos;
     uint32_t eax, ebx, ecx, edx;
     HVMStateEntry *se;
     char *chgset;
     struct hvm_save_header hdr;
-
-    if (!is_hvm_vcpu(v)) {
-        printk("hvm_save only for hvm guest!\n");
-        return -1;
-    }
-
-    memset(h, 0, sizeof(hvm_domain_context_t));
 
     hdr.magic = HVM_FILE_MAGIC;
     hdr.version = HVM_FILE_VERSION;
@@ -222,7 +215,7 @@ int hvm_save(struct vcpu *v, hvm_domain_
     hvm_put_8u(h, len);
     hvm_put_buffer(h, chgset, len);
 
-    for(se = v->domain->arch.hvm_domain.first_se; se != NULL; se = se->next) {
+    for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) {
         /* ID string */
         len = strnlen(se->idstr, HVM_SE_IDSTR_LEN);
         hvm_put_8u(h, len);
@@ -270,7 +263,7 @@ static HVMStateEntry *find_se(struct dom
     return NULL;
 }
 
-int hvm_load(struct vcpu *v, hvm_domain_context_t *h)
+int hvm_load(struct domain *d, hvm_domain_context_t *h)
 {
     uint32_t len, rec_len, rec_pos, instance_id, version_id;
     uint32_t eax, ebx, ecx, edx;
@@ -280,11 +273,7 @@ int hvm_load(struct vcpu *v, hvm_domain_
     char *cur_chgset;
     int ret;
     struct hvm_save_header hdr;
-
-    if (!is_hvm_vcpu(v)) {
-        printk("hvm_load only for hvm guest!\n");
-        return -1;
-    }
+    struct vcpu *v;
 
     if (h->size >= HVM_CTXT_SIZE) {
         printk("hvm_load fail! seems hvm_domain_context overflow when 
hvm_save! need %"PRId32" bytes.\n", h->size);
@@ -339,6 +328,11 @@ int hvm_load(struct vcpu *v, hvm_domain_
         printk("warnings: try to restore hvm guest when changeset is 
unavailable.\n");
 
 
+    /* Down all the vcpus: we only re-enable the ones that had state saved. */
+    for_each_vcpu(d, v) 
+        if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
+            vcpu_sleep_nosync(v);
+
     while(1) {
         if (hvm_ctxt_end(h)) {
             break;
@@ -362,7 +356,7 @@ int hvm_load(struct vcpu *v, hvm_domain_
         rec_len = hvm_get_32u(h);
         rec_pos = hvm_ctxt_tell(h);
 
-        se = find_se(v->domain, idstr, instance_id);
+        se = find_se(d, idstr, instance_id);
         if (se == NULL) {
             printk("warnings: hvm load can't find device %s's instance %d!\n",
                     idstr, instance_id);
@@ -384,21 +378,6 @@ int hvm_load(struct vcpu *v, hvm_domain_
     return 0;
 }
 
-int arch_gethvm_ctxt(
-    struct vcpu *v, struct hvm_domain_context *c)
-{
-    if ( !is_hvm_vcpu(v) )
-        return -1;
-
-    return hvm_save(v, c);
-
-}
-
-int arch_sethvm_ctxt(
-        struct vcpu *v, struct hvm_domain_context *c)
-{
-    return hvm_load(v, c);
-}
 
 #ifdef HVM_DEBUG_SUSPEND
 static void shpage_info(shared_iopage_t *sh)
diff -r 5bb084098493 -r 99d36a153024 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Mon Jan 29 13:22:21 2007 +0000
+++ b/xen/include/asm-x86/hvm/support.h Mon Jan 29 16:49:29 2007 +0000
@@ -242,11 +242,8 @@ static inline void hvm_get_buffer(hvm_do
 #define hvm_get_struct(_h, _p) \
     hvm_get_buffer((_h), (char *)(_p), sizeof(*(_p)))
 
-int hvm_save(struct vcpu*, hvm_domain_context_t *h);
-int hvm_load(struct vcpu*, hvm_domain_context_t *h);
-
-int arch_sethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
-int arch_gethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
+int hvm_save(struct domain *d, hvm_domain_context_t *h);
+int hvm_load(struct domain *d, hvm_domain_context_t *h);
 
 void shpage_init(struct domain *d, shared_iopage_t *sp);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.